LCOV - code coverage report
Current view: top level - lkbce/include/linux - swap.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 1 1 100.0 %
Date: 2017-01-25 Functions: 0 0 -

          Line data    Source code
       1             : #ifndef _LINUX_SWAP_H
       2             : #define _LINUX_SWAP_H
       3             : 
       4             : #include <linux/spinlock.h>
       5             : #include <linux/linkage.h>
       6             : #include <linux/mmzone.h>
       7             : #include <linux/list.h>
       8             : #include <linux/memcontrol.h>
       9             : #include <linux/sched.h>
      10             : #include <linux/node.h>
      11             : 
      12             : #include <asm/atomic.h>
      13             : #include <asm/page.h>
      14             : 
      15             : struct notifier_block;
      16             : 
      17             : struct bio;
      18             : 
      19             : #define SWAP_FLAG_PREFER        0x8000  /* set if swap priority specified */
      20             : #define SWAP_FLAG_PRIO_MASK     0x7fff
      21             : #define SWAP_FLAG_PRIO_SHIFT    0
      22             : 
      23             : static inline int current_is_kswapd(void)
      24             : {
      25             :         return current->flags & PF_KSWAPD;
      26             : }
      27             : 
      28             : /*
      29             :  * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
      30             :  * be swapped to.  The swap type and the offset into that swap type are
      31             :  * encoded into pte's and into pgoff_t's in the swapcache.  Using five bits
      32             :  * for the type means that the maximum number of swapcache pages is 27 bits
      33             :  * on 32-bit-pgoff_t architectures.  And that assumes that the architecture packs
      34             :  * the type/offset into the pte as 5/27 as well.
      35             :  */
      36             : #define MAX_SWAPFILES_SHIFT     5
      37             : 
      38             : /*
      39             :  * Use some of the swap files numbers for other purposes. This
      40             :  * is a convenient way to hook into the VM to trigger special
      41             :  * actions on faults.
      42             :  */
      43             : 
      44             : /*
      45             :  * NUMA node memory migration support
      46             :  */
      47             : #ifdef CONFIG_MIGRATION
      48             : #define SWP_MIGRATION_NUM 2
      49             : #define SWP_MIGRATION_READ      (MAX_SWAPFILES + SWP_HWPOISON_NUM)
      50             : #define SWP_MIGRATION_WRITE     (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
      51             : #else
      52             : #define SWP_MIGRATION_NUM 0
      53             : #endif
      54             : 
      55             : /*
      56             :  * Handling of hardware poisoned pages with memory corruption.
      57             :  */
      58             : #ifdef CONFIG_MEMORY_FAILURE
      59             : #define SWP_HWPOISON_NUM 1
      60             : #define SWP_HWPOISON            MAX_SWAPFILES
      61             : #else
      62             : #define SWP_HWPOISON_NUM 0
      63             : #endif
      64             : 
      65             : #define MAX_SWAPFILES \
      66             :         ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
      67             : 
      68             : /*
      69             :  * Magic header for a swap area. The first part of the union is
      70             :  * what the swap magic looks like for the old (limited to 128MB)
      71             :  * swap area format, the second part of the union adds - in the
      72             :  * old reserved area - some extra information. Note that the first
      73             :  * kilobyte is reserved for boot loader or disk label stuff...
      74             :  *
      75             :  * Having the magic at the end of the PAGE_SIZE makes detecting swap
      76             :  * areas somewhat tricky on machines that support multiple page sizes.
      77             :  * For 2.5 we'll probably want to move the magic to just beyond the
      78             :  * bootbits...
      79             :  */
      80             : union swap_header {
      81             :         struct {
      82             :                 char reserved[PAGE_SIZE - 10];
      83             :                 char magic[10];                 /* SWAP-SPACE or SWAPSPACE2 */
      84             :         } magic;
      85             :         struct {
      86             :                 char            bootbits[1024]; /* Space for disklabel etc. */
      87             :                 __u32           version;
      88             :                 __u32           last_page;
      89             :                 __u32           nr_badpages;
      90             :                 unsigned char   sws_uuid[16];
      91             :                 unsigned char   sws_volume[16];
      92             :                 __u32           padding[117];
      93             :                 __u32           badpages[1];
      94             :         } info;
      95             : };
      96             : 
      97             :  /* A swap entry has to fit into a "unsigned long", as
      98             :   * the entry is hidden in the "index" field of the
      99             :   * swapper address space.
     100             :   */
     101             : typedef struct {
     102             :         unsigned long val;
     103             : } swp_entry_t;
     104           1 : 
     105             : /*
     106             :  * current->reclaim_state points to one of these when a task is running
     107             :  * memory reclaim
     108             :  */
     109             : struct reclaim_state {
     110             :         unsigned long reclaimed_slab;
     111             : };
     112             : 
     113             : #ifdef __KERNEL__
     114             : 
     115             : struct address_space;
     116             : struct sysinfo;
     117             : struct writeback_control;
     118             : struct zone;
     119             : 
     120             : /*
     121             :  * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
     122             :  * disk blocks.  A list of swap extents maps the entire swapfile.  (Where the
     123             :  * term `swapfile' refers to either a blockdevice or an IS_REG file.  Apart
     124             :  * from setup, they're handled identically.
     125             :  *
     126             :  * We always assume that blocks are of size PAGE_SIZE.
     127             :  */
     128             : struct swap_extent {
     129             :         struct list_head list;
     130             :         pgoff_t start_page;
     131             :         pgoff_t nr_pages;
     132             :         sector_t start_block;
     133             : };
     134             : 
     135             : /*
     136             :  * Max bad pages in the new format..
     137             :  */
     138             : #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
     139             : #define MAX_SWAP_BADPAGES \
     140             :         ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
     141             : 
     142             : enum {
     143             :         SWP_USED        = (1 << 0),       /* is slot in swap_info[] used? */
     144             :         SWP_WRITEOK     = (1 << 1),       /* ok to write to this swap?    */
     145             :         SWP_DISCARDABLE = (1 << 2),       /* blkdev supports discard */
     146             :         SWP_DISCARDING  = (1 << 3),       /* now discarding a free cluster */
     147             :         SWP_SOLIDSTATE  = (1 << 4),       /* blkdev seeks are cheap */
     148             :         SWP_CONTINUED   = (1 << 5),       /* swap_map has count continuation */
     149             :                                         /* add others here before... */
     150             :         SWP_SCANNING    = (1 << 8),       /* refcount in scan_swap_map */
     151             : };
     152             : 
     153             : #define SWAP_CLUSTER_MAX 32
     154             : 
     155             : #define SWAP_MAP_MAX    0x3e    /* Max duplication count, in first swap_map */
     156             : #define SWAP_MAP_BAD    0x3f    /* Note pageblock is bad, in first swap_map */
     157             : #define SWAP_HAS_CACHE  0x40    /* Flag page is cached, in first swap_map */
     158             : #define SWAP_CONT_MAX   0x7f    /* Max count, in each swap_map continuation */
     159             : #define COUNT_CONTINUED 0x80    /* See swap_map continuation for full count */
     160             : #define SWAP_MAP_SHMEM  0xbf    /* Owned by shmem/tmpfs, in first swap_map */
     161             : 
     162             : /*
     163             :  * The in-memory structure used to track swap areas.
     164             :  */
     165             : struct swap_info_struct {
     166             :         unsigned long   flags;          /* SWP_USED etc: see above */
     167             :         signed short    prio;           /* swap priority of this type */
     168             :         signed char     type;           /* strange name for an index */
     169             :         signed char     next;           /* next type on the swap list */
     170             :         unsigned int    max;            /* extent of the swap_map */
     171             :         unsigned char *swap_map;        /* vmalloc'ed array of usage counts */
     172             :         unsigned int lowest_bit;        /* index of first free in swap_map */
     173             :         unsigned int highest_bit;       /* index of last free in swap_map */
     174             :         unsigned int pages;             /* total of usable pages of swap */
     175             :         unsigned int inuse_pages;       /* number of those currently in use */
     176             :         unsigned int cluster_next;      /* likely index for next allocation */
     177             :         unsigned int cluster_nr;        /* countdown to next cluster search */
     178             :         unsigned int lowest_alloc;      /* while preparing discard cluster */
     179             :         unsigned int highest_alloc;     /* while preparing discard cluster */
     180             :         struct swap_extent *curr_swap_extent;
     181             :         struct swap_extent first_swap_extent;
     182             :         struct block_device *bdev;      /* swap device or bdev of swap file */
     183             :         struct file *swap_file;         /* seldom referenced */
     184             :         unsigned int old_block_size;    /* seldom referenced */
     185             : };
     186             : 
     187             : struct swap_list_t {
     188             :         int head;       /* head of priority-ordered swapfile list */
     189             :         int next;       /* swapfile to be used next */
     190             : };
     191             : 
     192             : /* Swap 50% full? Release swapcache more aggressively.. */
     193             : #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
     194             : 
     195             : /* linux/mm/page_alloc.c */
     196             : extern unsigned long totalram_pages;
     197             : extern unsigned long totalreserve_pages;
     198             : extern unsigned int nr_free_buffer_pages(void);
     199             : extern unsigned int nr_free_pagecache_pages(void);
     200             : 
     201             : /* Definition of global_page_state not available yet */
     202             : #define nr_free_pages() global_page_state(NR_FREE_PAGES)
     203             : 
     204             : 
     205             : /* linux/mm/swap.c */
     206             : extern void __lru_cache_add(struct page *, enum lru_list lru);
     207             : extern void lru_cache_add_lru(struct page *, enum lru_list lru);
     208             : extern void activate_page(struct page *);
     209             : extern void mark_page_accessed(struct page *);
     210             : extern void lru_add_drain(void);
     211             : extern int lru_add_drain_all(void);
     212             : extern void rotate_reclaimable_page(struct page *page);
     213             : extern void swap_setup(void);
     214             : 
     215             : extern void add_page_to_unevictable_list(struct page *page);
     216             : 
     217             : /**
     218             :  * lru_cache_add: add a page to the page lists
     219             :  * @page: the page to add
     220             :  */
     221             : static inline void lru_cache_add_anon(struct page *page)
     222             : {
     223             :         __lru_cache_add(page, LRU_INACTIVE_ANON);
     224             : }
     225             : 
     226             : static inline void lru_cache_add_file(struct page *page)
     227             : {
     228             :         __lru_cache_add(page, LRU_INACTIVE_FILE);
     229             : }
     230             : 
     231             : /* linux/mm/vmscan.c */
     232             : extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
     233             :                                         gfp_t gfp_mask, nodemask_t *mask);
     234             : extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
     235             :                                                   gfp_t gfp_mask, bool noswap,
     236             :                                                   unsigned int swappiness);
     237             : extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
     238             :                                                 gfp_t gfp_mask, bool noswap,
     239             :                                                 unsigned int swappiness,
     240             :                                                 struct zone *zone,
     241             :                                                 int nid);
     242             : extern int __isolate_lru_page(struct page *page, int mode, int file);
     243             : extern unsigned long shrink_all_memory(unsigned long nr_pages);
     244             : extern int vm_swappiness;
     245             : extern int remove_mapping(struct address_space *mapping, struct page *page);
     246             : extern long vm_total_pages;
     247             : 
     248             : #ifdef CONFIG_NUMA
     249             : extern int zone_reclaim_mode;
     250             : extern int sysctl_min_unmapped_ratio;
     251             : extern int sysctl_min_slab_ratio;
     252             : extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
     253             : #else
     254             : #define zone_reclaim_mode 0
     255             : static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
     256             : {
     257             :         return 0;
     258             : }
     259             : #endif
     260             : 
     261             : extern int page_evictable(struct page *page, struct vm_area_struct *vma);
     262             : extern void scan_mapping_unevictable_pages(struct address_space *);
     263             : 
     264             : extern unsigned long scan_unevictable_pages;
     265             : extern int scan_unevictable_handler(struct ctl_table *, int,
     266             :                                         void __user *, size_t *, loff_t *);
     267             : extern int scan_unevictable_register_node(struct node *node);
     268             : extern void scan_unevictable_unregister_node(struct node *node);
     269             : 
     270             : extern int kswapd_run(int nid);
     271             : extern void kswapd_stop(int nid);
     272             : 
     273             : #ifdef CONFIG_MMU
     274             : /* linux/mm/shmem.c */
     275             : extern int shmem_unuse(swp_entry_t entry, struct page *page);
     276             : #endif /* CONFIG_MMU */
     277             : 
     278             : extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
     279             : 
     280             : #ifdef CONFIG_SWAP
     281             : /* linux/mm/page_io.c */
     282             : extern int swap_readpage(struct page *);
     283             : extern int swap_writepage(struct page *page, struct writeback_control *wbc);
     284             : extern void end_swap_bio_read(struct bio *bio, int err);
     285             : 
     286             : /* linux/mm/swap_state.c */
     287             : extern struct address_space swapper_space;
     288             : #define total_swapcache_pages  swapper_space.nrpages
     289             : extern void show_swap_cache_info(void);
     290             : extern int add_to_swap(struct page *);
     291             : extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
     292             : extern void __delete_from_swap_cache(struct page *);
     293             : extern void delete_from_swap_cache(struct page *);
     294             : extern void free_page_and_swap_cache(struct page *);
     295             : extern void free_pages_and_swap_cache(struct page **, int);
     296             : extern struct page *lookup_swap_cache(swp_entry_t);
     297             : extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
     298             :                         struct vm_area_struct *vma, unsigned long addr);
     299             : extern struct page *swapin_readahead(swp_entry_t, gfp_t,
     300             :                         struct vm_area_struct *vma, unsigned long addr);
     301             : 
     302             : /* linux/mm/swapfile.c */
     303             : extern long nr_swap_pages;
     304             : extern long total_swap_pages;
     305             : extern void si_swapinfo(struct sysinfo *);
     306             : extern swp_entry_t get_swap_page(void);
     307             : extern swp_entry_t get_swap_page_of_type(int);
     308             : extern int valid_swaphandles(swp_entry_t, unsigned long *);
     309             : extern int add_swap_count_continuation(swp_entry_t, gfp_t);
     310             : extern void swap_shmem_alloc(swp_entry_t);
     311             : extern int swap_duplicate(swp_entry_t);
     312             : extern int swapcache_prepare(swp_entry_t);
     313             : extern void swap_free(swp_entry_t);
     314             : extern void swapcache_free(swp_entry_t, struct page *page);
     315             : extern int free_swap_and_cache(swp_entry_t);
     316             : extern int swap_type_of(dev_t, sector_t, struct block_device **);
     317             : extern unsigned int count_swap_pages(int, int);
     318             : extern sector_t map_swap_page(struct page *, struct block_device **);
     319             : extern sector_t swapdev_block(int, pgoff_t);
     320             : extern int reuse_swap_page(struct page *);
     321             : extern int try_to_free_swap(struct page *);
     322             : struct backing_dev_info;
     323             : 
     324             : /* linux/mm/thrash.c */
     325             : extern struct mm_struct *swap_token_mm;
     326             : extern void grab_swap_token(struct mm_struct *);
     327             : extern void __put_swap_token(struct mm_struct *);
     328             : 
     329             : static inline int has_swap_token(struct mm_struct *mm)
     330             : {
     331             :         return (mm == swap_token_mm);
     332             : }
     333             : 
     334             : static inline void put_swap_token(struct mm_struct *mm)
     335             : {
     336             :         if (has_swap_token(mm))
     337             :                 __put_swap_token(mm);
     338             : }
     339             : 
     340             : static inline void disable_swap_token(void)
     341             : {
     342             :         put_swap_token(swap_token_mm);
     343             : }
     344             : 
     345             : #ifdef CONFIG_CGROUP_MEM_RES_CTLR
     346             : extern void
     347             : mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
     348             : #else
     349             : static inline void
     350             : mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
     351             : {
     352             : }
     353             : #endif
     354             : #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
     355             : extern void mem_cgroup_uncharge_swap(swp_entry_t ent);
     356             : #else
     357             : static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
     358             : {
     359             : }
     360             : #endif
     361             : 
     362             : #else /* CONFIG_SWAP */
     363             : 
     364             : #define nr_swap_pages                           0L
     365             : #define total_swap_pages                        0L
     366             : #define total_swapcache_pages                   0UL
     367             : 
     368             : #define si_swapinfo(val) \
     369             :         do { (val)->freeswap = (val)->totalswap = 0; } while (0)
     370             : /* only sparc can not include linux/pagemap.h in this file
     371             :  * so leave page_cache_release and release_pages undeclared... */
     372             : #define free_page_and_swap_cache(page) \
     373             :         page_cache_release(page)
     374             : #define free_pages_and_swap_cache(pages, nr) \
     375             :         release_pages((pages), (nr), 0);
     376             : 
     377             : static inline void show_swap_cache_info(void)
     378             : {
     379             : }
     380             : 
     381             : #define free_swap_and_cache(swp)        is_migration_entry(swp)
     382             : #define swapcache_prepare(swp)          is_migration_entry(swp)
     383             : 
     384             : static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
     385             : {
     386             :         return 0;
     387             : }
     388             : 
     389             : static inline void swap_shmem_alloc(swp_entry_t swp)
     390             : {
     391             : }
     392             : 
     393             : static inline int swap_duplicate(swp_entry_t swp)
     394             : {
     395             :         return 0;
     396             : }
     397             : 
     398             : static inline void swap_free(swp_entry_t swp)
     399             : {
     400             : }
     401             : 
     402             : static inline void swapcache_free(swp_entry_t swp, struct page *page)
     403             : {
     404             : }
     405             : 
     406             : static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
     407             :                         struct vm_area_struct *vma, unsigned long addr)
     408             : {
     409             :         return NULL;
     410             : }
     411             : 
     412             : static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
     413             : {
     414             :         return 0;
     415             : }
     416             : 
     417             : static inline struct page *lookup_swap_cache(swp_entry_t swp)
     418             : {
     419             :         return NULL;
     420             : }
     421             : 
     422             : static inline int add_to_swap(struct page *page)
     423             : {
     424             :         return 0;
     425             : }
     426             : 
     427             : static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
     428             :                                                         gfp_t gfp_mask)
     429             : {
     430             :         return -1;
     431             : }
     432             : 
     433             : static inline void __delete_from_swap_cache(struct page *page)
     434             : {
     435             : }
     436             : 
     437             : static inline void delete_from_swap_cache(struct page *page)
     438             : {
     439             : }
     440             : 
     441             : #define reuse_swap_page(page)   (page_mapcount(page) == 1)
     442             : 
     443             : static inline int try_to_free_swap(struct page *page)
     444             : {
     445             :         return 0;
     446             : }
     447             : 
     448             : static inline swp_entry_t get_swap_page(void)
     449             : {
     450             :         swp_entry_t entry;
     451             :         entry.val = 0;
     452             :         return entry;
     453             : }
     454             : 
     455             : /* linux/mm/thrash.c */
     456             : static inline void put_swap_token(struct mm_struct *mm)
     457             : {
     458             : }
     459             : 
     460             : static inline void grab_swap_token(struct mm_struct *mm)
     461             : {
     462             : }
     463             : 
     464             : static inline int has_swap_token(struct mm_struct *mm)
     465             : {
     466             :         return 0;
     467             : }
     468             : 
     469             : static inline void disable_swap_token(void)
     470             : {
     471             : }
     472             : 
     473             : static inline void
     474             : mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
     475             : {
     476             : }
     477             : 
     478             : #endif /* CONFIG_SWAP */
     479             : #endif /* __KERNEL__*/
     480             : #endif /* _LINUX_SWAP_H */

Generated by: LCOV version 1.10