LCOV - code coverage report
Current view: top level - include/linux - buffer_head.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 1 1 100.0 %
Date: 2017-01-25 Functions: 0 0 -

          Line data    Source code
       1             : /*
       2             :  * include/linux/buffer_head.h
       3             :  *
       4             :  * Everything to do with buffer_heads.
       5             :  */
       6             : 
       7             : #ifndef _LINUX_BUFFER_HEAD_H
       8             : #define _LINUX_BUFFER_HEAD_H
       9             : 
      10             : #include <linux/types.h>
      11             : #include <linux/fs.h>
      12             : #include <linux/linkage.h>
      13             : #include <linux/pagemap.h>
      14             : #include <linux/wait.h>
      15             : #include <asm/atomic.h>
      16             : 
      17             : #ifdef CONFIG_BLOCK
      18             : 
      19             : enum bh_state_bits {
      20             :         BH_Uptodate,    /* Contains valid data */
      21             :         BH_Dirty,       /* Is dirty */
      22             :         BH_Lock,        /* Is locked */
      23             :         BH_Req,         /* Has been submitted for I/O */
      24             :         BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
      25             :                           * IO completion of other buffers in the page
      26             :                           */
      27             : 
      28             :         BH_Mapped,      /* Has a disk mapping */
      29             :         BH_New,         /* Disk mapping was newly created by get_block */
      30             :         BH_Async_Read,  /* Is under end_buffer_async_read I/O */
      31             :         BH_Async_Write, /* Is under end_buffer_async_write I/O */
      32             :         BH_Delay,       /* Buffer is not yet allocated on disk */
      33             :         BH_Boundary,    /* Block is followed by a discontiguity */
      34             :         BH_Write_EIO,   /* I/O error on write */
      35             :         BH_Ordered,     /* ordered write */
      36             :         BH_Eopnotsupp,  /* operation not supported (barrier) */
      37             :         BH_Unwritten,   /* Buffer is allocated on disk but not written */
      38             :         BH_Quiet,       /* Buffer Error Prinks to be quiet */
      39             : 
      40             :         BH_PrivateStart,/* not a state bit, but the first bit available
      41             :                          * for private allocation by other entities
      42             :                          */
      43             : };
      44             : 
      45             : #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
      46             : 
      47             : struct page;
      48             : struct buffer_head;
      49             : struct address_space;
      50             : typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
      51             : 
      52             : /*
      53             :  * Historically, a buffer_head was used to map a single block
      54             :  * within a page, and of course as the unit of I/O through the
      55             :  * filesystem and block layers.  Nowadays the basic I/O unit
      56             :  * is the bio, and buffer_heads are used for extracting block
      57             :  * mappings (via a get_block_t call), for tracking state within
      58             :  * a page (via a page_mapping) and for wrapping bio submission
      59             :  * for backward compatibility reasons (e.g. submit_bh).
      60             :  */
      61             : struct buffer_head {
      62             :         unsigned long b_state;          /* buffer state bitmap (see above) */
      63             :         struct buffer_head *b_this_page;/* circular list of page's buffers */
      64             :         struct page *b_page;            /* the page this bh is mapped to */
      65             : 
      66             :         sector_t b_blocknr;             /* start block number */
      67             :         size_t b_size;                  /* size of mapping */
      68             :         char *b_data;                   /* pointer to data within the page */
      69             : 
      70             :         struct block_device *b_bdev;
      71             :         bh_end_io_t *b_end_io;          /* I/O completion */
      72             :         void *b_private;                /* reserved for b_end_io */
      73             :         struct list_head b_assoc_buffers; /* associated with another mapping */
      74             :         struct address_space *b_assoc_map;      /* mapping this buffer is
      75             :                                                    associated with */
      76             :         atomic_t b_count;               /* users using this buffer_head */
      77             : };
      78             : 
      79             : /*
      80             :  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
      81             :  * and buffer_foo() functions.
      82             :  */
      83             : #define BUFFER_FNS(bit, name)                                           \
      84             : static inline void set_buffer_##name(struct buffer_head *bh)            \
      85             : {                                                                       \
      86             :         set_bit(BH_##bit, &(bh)->b_state);                               \
      87             : }                                                                       \
      88             : static inline void clear_buffer_##name(struct buffer_head *bh)          \
      89             : {                                                                       \
      90             :         clear_bit(BH_##bit, &(bh)->b_state);                             \
      91             : }                                                                       \
      92             : static inline int buffer_##name(const struct buffer_head *bh)           \
      93             : {                                                                       \
      94             :         return test_bit(BH_##bit, &(bh)->b_state);                       \
      95             : }
      96             : 
      97             : /*
      98             :  * test_set_buffer_foo() and test_clear_buffer_foo()
      99             :  */
     100             : #define TAS_BUFFER_FNS(bit, name)                                       \
     101             : static inline int test_set_buffer_##name(struct buffer_head *bh)        \
     102             : {                                                                       \
     103             :         return test_and_set_bit(BH_##bit, &(bh)->b_state);               \
     104             : }                                                                       \
     105             : static inline int test_clear_buffer_##name(struct buffer_head *bh)      \
     106             : {                                                                       \
     107             :         return test_and_clear_bit(BH_##bit, &(bh)->b_state);             \
     108             : }                                                                       \
     109             : 
     110             : /*
     111             :  * Emit the buffer bitops functions.   Note that there are also functions
     112             :  * of the form "mark_buffer_foo()".  These are higher-level functions which
     113             :  * do something in addition to setting a b_state bit.
     114             :  */
     115             : BUFFER_FNS(Uptodate, uptodate)
     116             : BUFFER_FNS(Dirty, dirty)
     117             : TAS_BUFFER_FNS(Dirty, dirty)
     118             : BUFFER_FNS(Lock, locked)
     119             : BUFFER_FNS(Req, req)
     120             : TAS_BUFFER_FNS(Req, req)
     121             : BUFFER_FNS(Mapped, mapped)
     122             : BUFFER_FNS(New, new)
     123             : BUFFER_FNS(Async_Read, async_read)
     124             : BUFFER_FNS(Async_Write, async_write)
     125             : BUFFER_FNS(Delay, delay)
     126             : BUFFER_FNS(Boundary, boundary)
     127             : BUFFER_FNS(Write_EIO, write_io_error)
     128             : BUFFER_FNS(Ordered, ordered)
     129             : BUFFER_FNS(Eopnotsupp, eopnotsupp)
     130             : BUFFER_FNS(Unwritten, unwritten)
     131             : 
     132             : #define bh_offset(bh)           ((unsigned long)(bh)->b_data & ~PAGE_MASK)
     133             : #define touch_buffer(bh)        mark_page_accessed(bh->b_page)
     134             : 
     135             : /* If we *know* page->private refers to buffer_heads */
     136             : #define page_buffers(page)                                      \
     137             :         ({                                                      \
     138             :                 BUG_ON(!PagePrivate(page));                     \
     139             :                 ((struct buffer_head *)page_private(page));     \
     140             :         })
     141             : #define page_has_buffers(page)  PagePrivate(page)
     142             : 
     143             : /*
     144             :  * Declarations
     145             :  */
     146             : 
     147             : void mark_buffer_dirty(struct buffer_head *bh);
     148             : void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
     149             : void set_bh_page(struct buffer_head *bh,
     150             :                 struct page *page, unsigned long offset);
     151             : int try_to_free_buffers(struct page *);
     152             : struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
     153             :                 int retry);
     154             : void create_empty_buffers(struct page *, unsigned long,
     155             :                         unsigned long b_state);
     156             : void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
     157             : void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
     158             : void end_buffer_async_write(struct buffer_head *bh, int uptodate);
     159             : 
     160             : /* Things to do with buffers at mapping->private_list */
     161             : void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
     162             : int inode_has_buffers(struct inode *);
     163             : void invalidate_inode_buffers(struct inode *);
     164             : int remove_inode_buffers(struct inode *inode);
     165             : int sync_mapping_buffers(struct address_space *mapping);
     166             : void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
     167             : 
     168             : void mark_buffer_async_write(struct buffer_head *bh);
     169             : void __wait_on_buffer(struct buffer_head *);
     170             : wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
     171             : struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
     172             :                         unsigned size);
     173             : struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
     174             :                         unsigned size);
     175             : void __brelse(struct buffer_head *);
     176             : void __bforget(struct buffer_head *);
     177             : void __breadahead(struct block_device *, sector_t block, unsigned int size);
     178             : struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size);
     179             : void invalidate_bh_lrus(void);
     180             : struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
     181             : void free_buffer_head(struct buffer_head * bh);
     182             : void unlock_buffer(struct buffer_head *bh);
     183             : void __lock_buffer(struct buffer_head *bh);
     184             : void ll_rw_block(int, int, struct buffer_head * bh[]);
     185             : int sync_dirty_buffer(struct buffer_head *bh);
     186             : int submit_bh(int, struct buffer_head *);
     187             : void write_boundary_block(struct block_device *bdev,
     188             :                         sector_t bblock, unsigned blocksize);
     189             : int bh_uptodate_or_lock(struct buffer_head *bh);
     190             : int bh_submit_read(struct buffer_head *bh);
     191             : 
     192             : extern int buffer_heads_over_limit;
     193             : 
     194             : /*
     195             :  * Generic address_space_operations implementations for buffer_head-backed
     196             :  * address_spaces.
     197             :  */
     198             : void block_invalidatepage(struct page *page, unsigned long offset);
     199             : int block_write_full_page(struct page *page, get_block_t *get_block,
     200             :                                 struct writeback_control *wbc);
     201             : int block_write_full_page_endio(struct page *page, get_block_t *get_block,
     202             :                         struct writeback_control *wbc, bh_end_io_t *handler);
     203             : int block_read_full_page(struct page*, get_block_t*);
     204             : int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
     205             :                                 unsigned long from);
     206             : int block_write_begin(struct file *, struct address_space *,
     207             :                                 loff_t, unsigned, unsigned,
     208             :                                 struct page **, void **, get_block_t*);
     209             : int block_write_end(struct file *, struct address_space *,
     210             :                                 loff_t, unsigned, unsigned,
     211             :                                 struct page *, void *);
     212             : int generic_write_end(struct file *, struct address_space *,
     213             :                                 loff_t, unsigned, unsigned,
     214             :                                 struct page *, void *);
     215             : void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
     216             : int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
     217             : int cont_write_begin(struct file *, struct address_space *, loff_t,
     218             :                         unsigned, unsigned, struct page **, void **,
     219             :                         get_block_t *, loff_t *);
     220             : int generic_cont_expand_simple(struct inode *inode, loff_t size);
     221             : int block_commit_write(struct page *page, unsigned from, unsigned to);
     222             : int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
     223             :                                 get_block_t get_block);
     224             : void block_sync_page(struct page *);
     225             : sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
     226             : int block_truncate_page(struct address_space *, loff_t, get_block_t *);
     227             : int file_fsync(struct file *, struct dentry *, int);
     228             : int nobh_write_begin(struct file *, struct address_space *,
     229             :                                 loff_t, unsigned, unsigned,
     230             :                                 struct page **, void **, get_block_t*);
     231             : int nobh_write_end(struct file *, struct address_space *,
     232             :                                 loff_t, unsigned, unsigned,
     233             :                                 struct page *, void *);
     234             : int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
     235             : int nobh_writepage(struct page *page, get_block_t *get_block,
     236             :                         struct writeback_control *wbc);
     237             : 
     238             : void buffer_init(void);
     239             : 
     240             : /*
     241             :  * inline definitions
     242             :  */
     243             : 
     244             : static inline void attach_page_buffers(struct page *page,
     245             :                 struct buffer_head *head)
     246             : {
     247             :         page_cache_get(page);
     248             :         SetPagePrivate(page);
     249             :         set_page_private(page, (unsigned long)head);
     250             : }
     251             : 
     252             : static inline void get_bh(struct buffer_head *bh)
     253             : {
     254             :         atomic_inc(&bh->b_count);
     255             : }
     256             : 
     257             : static inline void put_bh(struct buffer_head *bh)
     258             : {
     259             :         smp_mb__before_atomic_dec();
     260             :         atomic_dec(&bh->b_count);
     261             : }
     262             : 
     263             : static inline void brelse(struct buffer_head *bh)
     264             : {
     265             :         if (bh)
     266             :                 __brelse(bh);
     267             : }
     268             : 
     269             : static inline void bforget(struct buffer_head *bh)
     270             : {
     271             :         if (bh)
     272             :                 __bforget(bh);
     273             : }
     274             : 
     275             : static inline struct buffer_head *
     276             : sb_bread(struct super_block *sb, sector_t block)
     277             : {
     278             :         return __bread(sb->s_bdev, block, sb->s_blocksize);
     279             : }
     280             : 
     281             : static inline void
     282             : sb_breadahead(struct super_block *sb, sector_t block)
     283             : {
     284             :         __breadahead(sb->s_bdev, block, sb->s_blocksize);
     285             : }
     286             : 
     287             : static inline struct buffer_head *
     288             : sb_getblk(struct super_block *sb, sector_t block)
     289             : {
     290             :         return __getblk(sb->s_bdev, block, sb->s_blocksize);
     291             : }
     292             : 
     293             : static inline struct buffer_head *
     294             : sb_find_get_block(struct super_block *sb, sector_t block)
     295             : {
     296             :         return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
     297             : }
     298             : 
     299             : static inline void
     300             : map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
     301             : {
     302             :         set_buffer_mapped(bh);
     303             :         bh->b_bdev = sb->s_bdev;
     304             :         bh->b_blocknr = block;
     305             :         bh->b_size = sb->s_blocksize;
     306             : }
     307             : 
     308             : /*
     309             :  * Calling wait_on_buffer() for a zero-ref buffer is illegal, so we call into
     310             :  * __wait_on_buffer() just to trip a debug check.  Because debug code in inline
     311             :  * functions is bloaty.
     312             :  */
     313             : static inline void wait_on_buffer(struct buffer_head *bh)
     314             : {
     315             :         might_sleep();
     316             :         if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0)
     317             :                 __wait_on_buffer(bh);
     318             : }
     319             : 
     320             : static inline int trylock_buffer(struct buffer_head *bh)
     321             : {
     322             :         return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
     323             : }
     324             : 
     325             : static inline void lock_buffer(struct buffer_head *bh)
     326             : {
     327             :         might_sleep();
     328             :         if (!trylock_buffer(bh))
     329             :                 __lock_buffer(bh);
     330             : }
     331             : 
     332             : extern int __set_page_dirty_buffers(struct page *page);
     333           1 : 
     334             : #else /* CONFIG_BLOCK */
     335             : 
     336             : static inline void buffer_init(void) {}
     337             : static inline int try_to_free_buffers(struct page *page) { return 1; }
     338             : static inline int inode_has_buffers(struct inode *inode) { return 0; }
     339             : static inline void invalidate_inode_buffers(struct inode *inode) {}
     340             : static inline int remove_inode_buffers(struct inode *inode) { return 1; }
     341             : static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
     342             : 
     343             : #endif /* CONFIG_BLOCK */
     344             : #endif /* _LINUX_BUFFER_HEAD_H */

Generated by: LCOV version 1.10