LCOV - code coverage report
Current view: top level - fs - mbcache.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 56 272 20.6 %
Date: 2017-01-25 Functions: 7 19 36.8 %

          Line data    Source code
       1             : /*
       2             :  * linux/fs/mbcache.c
       3             :  * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
       4             :  */
       5             : 
       6             : /*
       7             :  * Filesystem Meta Information Block Cache (mbcache)
       8             :  *
       9             :  * The mbcache caches blocks of block devices that need to be located
      10             :  * by their device/block number, as well as by other criteria (such
      11             :  * as the block's contents).
      12             :  *
      13             :  * There can only be one cache entry in a cache per device and block number.
      14             :  * Additional indexes need not be unique in this sense. The number of
      15             :  * additional indexes (=other criteria) can be hardwired at compile time
      16             :  * or specified at cache create time.
      17             :  *
      18             :  * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
      19             :  * in the cache. A valid entry is in the main hash tables of the cache,
      20             :  * and may also be in the lru list. An invalid entry is not in any hashes
      21             :  * or lists.
      22             :  *
      23             :  * A valid cache entry is only in the lru list if no handles refer to it.
      24             :  * Invalid cache entries will be freed when the last handle to the cache
      25             :  * entry is released. Entries that cannot be freed immediately are put
      26             :  * back on the lru list.
      27             :  */
      28             : 
      29             : #include <linux/kernel.h>
      30             : #include <linux/module.h>
      31             : 
      32             : #include <linux/hash.h>
      33             : #include <linux/fs.h>
      34             : #include <linux/mm.h>
      35             : #include <linux/slab.h>
      36             : #include <linux/sched.h>
      37             : #include <linux/init.h>
      38             : #include <linux/mbcache.h>
      39             : 
      40             : 
      41             : #ifdef MB_CACHE_DEBUG
      42             : # define mb_debug(f...) do { \
      43             :                 printk(KERN_DEBUG f); \
      44             :                 printk("\n"); \
      45             :         } while (0)
      46             : #define mb_assert(c) do { if (!(c)) \
      47             :                 printk(KERN_ERR "assertion " #c " failed\n"); \
      48             :         } while(0)
      49             : #else
      50             : # define mb_debug(f...) do { } while(0)
      51             : # define mb_assert(c) do { } while(0)
      52             : #endif
      53             : #define mb_error(f...) do { \
      54             :                 printk(KERN_ERR f); \
      55             :                 printk("\n"); \
      56             :         } while(0)
      57             : 
      58             : #define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
      59             : 
      60           1 : static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
      61           1 :                 
      62             : MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
      63             : MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
      64             : MODULE_LICENSE("GPL");
      65             : 
      66             : EXPORT_SYMBOL(mb_cache_create);
      67             : EXPORT_SYMBOL(mb_cache_shrink);
      68             : EXPORT_SYMBOL(mb_cache_destroy);
      69             : EXPORT_SYMBOL(mb_cache_entry_alloc);
      70             : EXPORT_SYMBOL(mb_cache_entry_insert);
      71             : EXPORT_SYMBOL(mb_cache_entry_release);
      72             : EXPORT_SYMBOL(mb_cache_entry_free);
      73             : EXPORT_SYMBOL(mb_cache_entry_get);
      74             : #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
      75             : EXPORT_SYMBOL(mb_cache_entry_find_first);
      76             : EXPORT_SYMBOL(mb_cache_entry_find_next);
      77             : #endif
      78             : 
      79             : struct mb_cache {
      80             :         struct list_head                c_cache_list;
      81             :         const char                      *c_name;
      82             :         struct mb_cache_op              c_op;
      83             :         atomic_t                        c_entry_count;
      84             :         int                             c_bucket_bits;
      85             : #ifndef MB_CACHE_INDEXES_COUNT
      86             :         int                             c_indexes_count;
      87             : #endif
      88             :         struct kmem_cache                       *c_entry_cache;
      89             :         struct list_head                *c_block_hash;
      90             :         struct list_head                *c_indexes_hash[0];
      91             : };
      92             : 
      93             : 
      94             : /*
      95             :  * Global data: list of all mbcache's, lru list, and a spinlock for
      96             :  * accessing cache data structures on SMP machines. The lru list is
      97             :  * global across all mbcaches.
      98             :  */
      99             : 
     100           1 : static LIST_HEAD(mb_cache_list);
     101           1 : static LIST_HEAD(mb_cache_lru_list);
     102           1 : static DEFINE_SPINLOCK(mb_cache_spinlock);
     103             : 
     104             : static inline int
     105             : mb_cache_indexes(struct mb_cache *cache)
     106             : {
     107             : #ifdef MB_CACHE_INDEXES_COUNT
     108           1 :         return MB_CACHE_INDEXES_COUNT;
     109             : #else
     110             :         return cache->c_indexes_count;
     111             : #endif
     112             : }
     113             : 
     114             : /*
     115             :  * What the mbcache registers as to get shrunk dynamically.
     116             :  */
     117             : 
     118             : static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
     119             : 
     120           1 : static struct shrinker mb_cache_shrinker = {
     121             :         .shrink = mb_cache_shrink_fn,
     122             :         .seeks = DEFAULT_SEEKS,
     123             : };
     124             : 
     125             : static inline int
     126             : __mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
     127             : {
     128           4 :         return !list_empty(&ce->e_block_list);
     129             : }
     130             : 
     131             : 
     132             : static void
     133             : __mb_cache_entry_unhash(struct mb_cache_entry *ce)
     134             : {
     135           1 :         int n;
     136           1 : 
     137           5 :         if (__mb_cache_entry_is_hashed(ce)) {
     138           2 :                 list_del_init(&ce->e_block_list);
     139           7 :                 for (n=0; n<mb_cache_indexes(ce->e_cache); n++)
     140           3 :                         list_del(&ce->e_indexes[n].o_list);
     141           2 :         }
     142             : }
     143             : 
     144             : 
     145             : static void
     146             : __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
     147           2 : {
     148           2 :         struct mb_cache *cache = ce->e_cache;
     149           1 : 
     150             :         mb_assert(!(ce->e_used || ce->e_queued));
     151           5 :         if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) {
     152             :                 /* free failed -- put back on the lru list
     153             :                    for freeing later. */
     154           2 :                 spin_lock(&mb_cache_spinlock);
     155           2 :                 list_add(&ce->e_lru_list, &mb_cache_lru_list);
     156           2 :                 spin_unlock(&mb_cache_spinlock);
     157             :         } else {
     158           2 :                 kmem_cache_free(cache->c_entry_cache, ce);
     159           4 :                 atomic_dec(&cache->c_entry_count);
     160             :         }
     161           3 : }
     162             : 
     163             : 
     164             : static void
     165             : __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
     166             :         __releases(mb_cache_spinlock)
     167           0 : {
     168             :         /* Wake up all processes queuing for this cache entry. */
     169           0 :         if (ce->e_queued)
     170           0 :                 wake_up_all(&mb_cache_queue);
     171           0 :         if (ce->e_used >= MB_CACHE_WRITER)
     172           0 :                 ce->e_used -= MB_CACHE_WRITER;
     173           0 :         ce->e_used--;
     174           0 :         if (!(ce->e_used || ce->e_queued)) {
     175           0 :                 if (!__mb_cache_entry_is_hashed(ce))
     176           0 :                         goto forget;
     177             :                 mb_assert(list_empty(&ce->e_lru_list));
     178           0 :                 list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
     179             :         }
     180           0 :         spin_unlock(&mb_cache_spinlock);
     181           0 :         return;
     182           0 : forget:
     183           0 :         spin_unlock(&mb_cache_spinlock);
     184           0 :         __mb_cache_entry_forget(ce, GFP_KERNEL);
     185           0 : }
     186             : 
     187             : 
     188             : /*
     189             :  * mb_cache_shrink_fn()  memory pressure callback
     190             :  *
     191             :  * This function is called by the kernel memory management when memory
     192             :  * gets low.
     193             :  *
     194             :  * @nr_to_scan: Number of objects to scan
     195             :  * @gfp_mask: (ignored)
     196             :  *
     197             :  * Returns the number of objects which are present in the cache.
     198             :  */
     199             : static int
     200             : mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask)
     201             : {
     202           3 :         LIST_HEAD(free_list);
     203           1 :         struct list_head *l, *ltmp;
     204           2 :         int count = 0;
     205           1 : 
     206           3 :         spin_lock(&mb_cache_spinlock);
     207           8 :         list_for_each(l, &mb_cache_list) {
     208           2 :                 struct mb_cache *cache =
     209           5 :                         list_entry(l, struct mb_cache, c_cache_list);
     210           1 :                 mb_debug("cache %s (%d)", cache->c_name,
     211           1 :                           atomic_read(&cache->c_entry_count));
     212           4 :                 count += atomic_read(&cache->c_entry_count);
     213           1 :         }
     214             :         mb_debug("trying to free %d entries", nr_to_scan);
     215           2 :         if (nr_to_scan == 0) {
     216           2 :                 spin_unlock(&mb_cache_spinlock);
     217           1 :                 goto out;
     218             :         }
     219           9 :         while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
     220           1 :                 struct mb_cache_entry *ce =
     221           3 :                         list_entry(mb_cache_lru_list.next,
     222             :                                    struct mb_cache_entry, e_lru_list);
     223           2 :                 list_move_tail(&ce->e_lru_list, &free_list);
     224           4 :                 __mb_cache_entry_unhash(ce);
     225             :         }
     226           5 :         spin_unlock(&mb_cache_spinlock);
     227           7 :         list_for_each_safe(l, ltmp, &free_list) {
     228           6 :                 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
     229           3 :                                                    e_lru_list), gfp_mask);
     230             :         }
     231             : out:
     232           2 :         return (count / 100) * sysctl_vfs_cache_pressure;
     233             : }
     234           1 : 
     235             : 
     236             : /*
     237             :  * mb_cache_create()  create a new cache
     238             :  *
     239             :  * All entries in one cache are equal size. Cache entries may be from
     240             :  * multiple devices. If this is the first mbcache created, registers
     241             :  * the cache with kernel memory management. Returns NULL if no more
     242             :  * memory was available.
     243             :  *
     244             :  * @name: name of the cache (informal)
     245             :  * @cache_op: contains the callback called when freeing a cache entry
     246             :  * @entry_size: The size of a cache entry, including
     247             :  *              struct mb_cache_entry
     248             :  * @indexes_count: number of additional indexes in the cache. Must equal
     249             :  *                 MB_CACHE_INDEXES_COUNT if the number of indexes is
     250             :  *                 hardwired.
     251             :  * @bucket_bits: log2(number of hash buckets)
     252             :  */
     253             : struct mb_cache *
     254             : mb_cache_create(const char *name, struct mb_cache_op *cache_op,
     255             :                 size_t entry_size, int indexes_count, int bucket_bits)
     256             : {
     257           0 :         int m=0, n, bucket_count = 1 << bucket_bits;
     258           0 :         struct mb_cache *cache = NULL;
     259           0 : 
     260           0 :         if(entry_size < sizeof(struct mb_cache_entry) +
     261           0 :            indexes_count * sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]))
     262           0 :                 return NULL;
     263           0 : 
     264           0 :         cache = kmalloc(sizeof(struct mb_cache) +
     265             :                         indexes_count * sizeof(struct list_head), GFP_KERNEL);
     266           0 :         if (!cache)
     267           0 :                 goto fail;
     268           0 :         cache->c_name = name;
     269           0 :         cache->c_op.free = NULL;
     270           0 :         if (cache_op)
     271           0 :                 cache->c_op.free = cache_op->free;
     272           0 :         atomic_set(&cache->c_entry_count, 0);
     273           0 :         cache->c_bucket_bits = bucket_bits;
     274             : #ifdef MB_CACHE_INDEXES_COUNT
     275             :         mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT);
     276             : #else
     277             :         cache->c_indexes_count = indexes_count;
     278             : #endif
     279           0 :         cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
     280             :                                       GFP_KERNEL);
     281           0 :         if (!cache->c_block_hash)
     282           0 :                 goto fail;
     283           0 :         for (n=0; n<bucket_count; n++)
     284           0 :                 INIT_LIST_HEAD(&cache->c_block_hash[n]);
     285           0 :         for (m=0; m<indexes_count; m++) {
     286           0 :                 cache->c_indexes_hash[m] = kmalloc(bucket_count *
     287           0 :                                                  sizeof(struct list_head),
     288             :                                                  GFP_KERNEL);
     289           0 :                 if (!cache->c_indexes_hash[m])
     290           0 :                         goto fail;
     291           0 :                 for (n=0; n<bucket_count; n++)
     292           0 :                         INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]);
     293           0 :         }
     294           0 :         cache->c_entry_cache = kmem_cache_create(name, entry_size, 0,
     295             :                 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
     296           0 :         if (!cache->c_entry_cache)
     297           0 :                 goto fail;
     298             : 
     299           0 :         spin_lock(&mb_cache_spinlock);
     300           0 :         list_add(&cache->c_cache_list, &mb_cache_list);
     301           0 :         spin_unlock(&mb_cache_spinlock);
     302           0 :         return cache;
     303           0 : 
     304             : fail:
     305           0 :         if (cache) {
     306           0 :                 while (--m >= 0)
     307           0 :                         kfree(cache->c_indexes_hash[m]);
     308           0 :                 kfree(cache->c_block_hash);
     309           0 :                 kfree(cache);
     310             :         }
     311           0 :         return NULL;
     312             : }
     313             : 
     314             : 
     315             : /*
     316             :  * mb_cache_shrink()
     317             :  *
     318             :  * Removes all cache entries of a device from the cache. All cache entries
     319             :  * currently in use cannot be freed, and thus remain in the cache. All others
     320             :  * are freed.
     321             :  *
     322             :  * @bdev: which device's cache entries to shrink
     323             :  */
     324             : void
     325             : mb_cache_shrink(struct block_device *bdev)
     326             : {
     327           0 :         LIST_HEAD(free_list);
     328           0 :         struct list_head *l, *ltmp;
     329           0 : 
     330           0 :         spin_lock(&mb_cache_spinlock);
     331           0 :         list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
     332           0 :                 struct mb_cache_entry *ce =
     333           0 :                         list_entry(l, struct mb_cache_entry, e_lru_list);
     334           0 :                 if (ce->e_bdev == bdev) {
     335           0 :                         list_move_tail(&ce->e_lru_list, &free_list);
     336           0 :                         __mb_cache_entry_unhash(ce);
     337             :                 }
     338             :         }
     339           0 :         spin_unlock(&mb_cache_spinlock);
     340           0 :         list_for_each_safe(l, ltmp, &free_list) {
     341           0 :                 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
     342           0 :                                                    e_lru_list), GFP_KERNEL);
     343             :         }
     344             : }
     345             : 
     346             : 
     347           0 : /*
     348             :  * mb_cache_destroy()
     349             :  *
     350             :  * Shrinks the cache to its minimum possible size (hopefully 0 entries),
     351             :  * and then destroys it. If this was the last mbcache, un-registers the
     352             :  * mbcache from kernel memory management.
     353             :  */
     354             : void
     355             : mb_cache_destroy(struct mb_cache *cache)
     356             : {
     357           0 :         LIST_HEAD(free_list);
     358           0 :         struct list_head *l, *ltmp;
     359           0 :         int n;
     360           0 : 
     361           0 :         spin_lock(&mb_cache_spinlock);
     362           0 :         list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
     363           0 :                 struct mb_cache_entry *ce =
     364           0 :                         list_entry(l, struct mb_cache_entry, e_lru_list);
     365           0 :                 if (ce->e_cache == cache) {
     366           0 :                         list_move_tail(&ce->e_lru_list, &free_list);
     367           0 :                         __mb_cache_entry_unhash(ce);
     368             :                 }
     369             :         }
     370           0 :         list_del(&cache->c_cache_list);
     371           0 :         spin_unlock(&mb_cache_spinlock);
     372             : 
     373           0 :         list_for_each_safe(l, ltmp, &free_list) {
     374           0 :                 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
     375           0 :                                                    e_lru_list), GFP_KERNEL);
     376             :         }
     377             : 
     378           0 :         if (atomic_read(&cache->c_entry_count) > 0) {
     379           0 :                 mb_error("cache %s: %d orphaned entries",
     380             :                           cache->c_name,
     381             :                           atomic_read(&cache->c_entry_count));
     382             :         }
     383             : 
     384           0 :         kmem_cache_destroy(cache->c_entry_cache);
     385             : 
     386           0 :         for (n=0; n < mb_cache_indexes(cache); n++)
     387           0 :                 kfree(cache->c_indexes_hash[n]);
     388           0 :         kfree(cache->c_block_hash);
     389           0 :         kfree(cache);
     390           0 : }
     391             : 
     392             : 
     393             : /*
     394             :  * mb_cache_entry_alloc()
     395             :  *
     396             :  * Allocates a new cache entry. The new entry will not be valid initially,
     397             :  * and thus cannot be looked up yet. It should be filled with data, and
     398             :  * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
     399             :  * if no more memory was available.
     400             :  */
     401             : struct mb_cache_entry *
     402             : mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
     403             : {
     404           0 :         struct mb_cache_entry *ce;
     405           0 : 
     406           0 :         ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
     407           0 :         if (ce) {
     408           0 :                 atomic_inc(&cache->c_entry_count);
     409           0 :                 INIT_LIST_HEAD(&ce->e_lru_list);
     410           0 :                 INIT_LIST_HEAD(&ce->e_block_list);
     411           0 :                 ce->e_cache = cache;
     412           0 :                 ce->e_used = 1 + MB_CACHE_WRITER;
     413           0 :                 ce->e_queued = 0;
     414             :         }
     415           0 :         return ce;
     416             : }
     417             : 
     418             : 
     419             : /*
     420             :  * mb_cache_entry_insert()
     421             :  *
     422             :  * Inserts an entry that was allocated using mb_cache_entry_alloc() into
     423             :  * the cache. After this, the cache entry can be looked up, but is not yet
     424             :  * in the lru list as the caller still holds a handle to it. Returns 0 on
     425             :  * success, or -EBUSY if a cache entry for that device + inode exists
     426             :  * already (this may happen after a failed lookup, but when another process
     427             :  * has inserted the same cache entry in the meantime).
     428             :  *
     429             :  * @bdev: device the cache entry belongs to
     430             :  * @block: block number
     431             :  * @keys: array of additional keys. There must be indexes_count entries
     432             :  *        in the array (as specified when creating the cache).
     433             :  */
     434             : int
     435             : mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
     436             :                       sector_t block, unsigned int keys[])
     437             : {
     438           0 :         struct mb_cache *cache = ce->e_cache;
     439           0 :         unsigned int bucket;
     440           0 :         struct list_head *l;
     441           0 :         int error = -EBUSY, n;
     442           0 : 
     443           0 :         bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), 
     444           0 :                            cache->c_bucket_bits);
     445           0 :         spin_lock(&mb_cache_spinlock);
     446           0 :         list_for_each_prev(l, &cache->c_block_hash[bucket]) {
     447           0 :                 struct mb_cache_entry *ce =
     448           0 :                         list_entry(l, struct mb_cache_entry, e_block_list);
     449           0 :                 if (ce->e_bdev == bdev && ce->e_block == block)
     450           0 :                         goto out;
     451             :         }
     452           0 :         __mb_cache_entry_unhash(ce);
     453           0 :         ce->e_bdev = bdev;
     454           0 :         ce->e_block = block;
     455           0 :         list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
     456           0 :         for (n=0; n<mb_cache_indexes(cache); n++) {
     457           0 :                 ce->e_indexes[n].o_key = keys[n];
     458           0 :                 bucket = hash_long(keys[n], cache->c_bucket_bits);
     459           0 :                 list_add(&ce->e_indexes[n].o_list,
     460             :                          &cache->c_indexes_hash[n][bucket]);
     461             :         }
     462           0 :         error = 0;
     463           0 : out:
     464           0 :         spin_unlock(&mb_cache_spinlock);
     465           0 :         return error;
     466             : }
     467             : 
     468             : 
     469             : /*
     470             :  * mb_cache_entry_release()
     471             :  *
     472             :  * Release a handle to a cache entry. When the last handle to a cache entry
     473             :  * is released it is either freed (if it is invalid) or otherwise inserted
     474             :  * in to the lru list.
     475             :  */
     476             : void
     477             : mb_cache_entry_release(struct mb_cache_entry *ce)
     478             : {
     479           0 :         spin_lock(&mb_cache_spinlock);
     480           0 :         __mb_cache_entry_release_unlock(ce);
     481           0 : }
     482             : 
     483             : 
     484             : /*
     485             :  * mb_cache_entry_free()
     486             :  *
     487             :  * This is equivalent to the sequence mb_cache_entry_takeout() --
     488             :  * mb_cache_entry_release().
     489             :  */
     490             : void
     491             : mb_cache_entry_free(struct mb_cache_entry *ce)
     492             : {
     493           0 :         spin_lock(&mb_cache_spinlock);
     494             :         mb_assert(list_empty(&ce->e_lru_list));
     495           0 :         __mb_cache_entry_unhash(ce);
     496           0 :         __mb_cache_entry_release_unlock(ce);
     497           0 : }
     498             : 
     499             : 
     500             : /*
     501             :  * mb_cache_entry_get()
     502             :  *
     503             :  * Get a cache entry  by device / block number. (There can only be one entry
     504             :  * in the cache per device and block.) Returns NULL if no such cache entry
     505             :  * exists. The returned cache entry is locked for exclusive access ("single
     506             :  * writer").
     507             :  */
     508             : struct mb_cache_entry *
     509             : mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
     510             :                    sector_t block)
     511             : {
     512           0 :         unsigned int bucket;
     513           0 :         struct list_head *l;
     514           0 :         struct mb_cache_entry *ce;
     515           0 : 
     516           0 :         bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
     517           0 :                            cache->c_bucket_bits);
     518           0 :         spin_lock(&mb_cache_spinlock);
     519           0 :         list_for_each(l, &cache->c_block_hash[bucket]) {
     520           0 :                 ce = list_entry(l, struct mb_cache_entry, e_block_list);
     521           0 :                 if (ce->e_bdev == bdev && ce->e_block == block) {
     522           0 :                         DEFINE_WAIT(wait);
     523             : 
     524           0 :                         if (!list_empty(&ce->e_lru_list))
     525           0 :                                 list_del_init(&ce->e_lru_list);
     526             : 
     527           0 :                         while (ce->e_used > 0) {
     528           0 :                                 ce->e_queued++;
     529           0 :                                 prepare_to_wait(&mb_cache_queue, &wait,
     530             :                                                 TASK_UNINTERRUPTIBLE);
     531           0 :                                 spin_unlock(&mb_cache_spinlock);
     532           0 :                                 schedule();
     533           0 :                                 spin_lock(&mb_cache_spinlock);
     534           0 :                                 ce->e_queued--;
     535             :                         }
     536           0 :                         finish_wait(&mb_cache_queue, &wait);
     537           0 :                         ce->e_used += 1 + MB_CACHE_WRITER;
     538             : 
     539           0 :                         if (!__mb_cache_entry_is_hashed(ce)) {
     540           0 :                                 __mb_cache_entry_release_unlock(ce);
     541           0 :                                 return NULL;
     542             :                         }
     543           0 :                         goto cleanup;
     544             :                 }
     545             :         }
     546           0 :         ce = NULL;
     547           0 : 
     548             : cleanup:
     549           0 :         spin_unlock(&mb_cache_spinlock);
     550           0 :         return ce;
     551             : }
     552             : 
     553             : #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
     554             : 
     555             : static struct mb_cache_entry *
     556             : __mb_cache_entry_find(struct list_head *l, struct list_head *head,
     557             :                       int index, struct block_device *bdev, unsigned int key)
     558             : {
     559           0 :         while (l != head) {
     560           0 :                 struct mb_cache_entry *ce =
     561           0 :                         list_entry(l, struct mb_cache_entry,
     562           0 :                                    e_indexes[index].o_list);
     563           0 :                 if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) {
     564           0 :                         DEFINE_WAIT(wait);
     565           0 : 
     566           0 :                         if (!list_empty(&ce->e_lru_list))
     567           0 :                                 list_del_init(&ce->e_lru_list);
     568             : 
     569             :                         /* Incrementing before holding the lock gives readers
     570             :                            priority over writers. */
     571           0 :                         ce->e_used++;
     572           0 :                         while (ce->e_used >= MB_CACHE_WRITER) {
     573           0 :                                 ce->e_queued++;
     574           0 :                                 prepare_to_wait(&mb_cache_queue, &wait,
     575             :                                                 TASK_UNINTERRUPTIBLE);
     576           0 :                                 spin_unlock(&mb_cache_spinlock);
     577           0 :                                 schedule();
     578           0 :                                 spin_lock(&mb_cache_spinlock);
     579           0 :                                 ce->e_queued--;
     580             :                         }
     581           0 :                         finish_wait(&mb_cache_queue, &wait);
     582             : 
     583           0 :                         if (!__mb_cache_entry_is_hashed(ce)) {
     584           0 :                                 __mb_cache_entry_release_unlock(ce);
     585           0 :                                 spin_lock(&mb_cache_spinlock);
     586           0 :                                 return ERR_PTR(-EAGAIN);
     587             :                         }
     588           0 :                         return ce;
     589             :                 }
     590           0 :                 l = l->next;
     591           0 :         }
     592           0 :         return NULL;
     593             : }
     594             : 
     595             : 
     596             : /*
     597             :  * mb_cache_entry_find_first()
     598             :  *
     599             :  * Find the first cache entry on a given device with a certain key in
     600             :  * an additional index. Additonal matches can be found with
     601             :  * mb_cache_entry_find_next(). Returns NULL if no match was found. The
     602             :  * returned cache entry is locked for shared access ("multiple readers").
     603             :  *
     604             :  * @cache: the cache to search
     605             :  * @index: the number of the additonal index to search (0<=index<indexes_count)
     606             :  * @bdev: the device the cache entry should belong to
     607             :  * @key: the key in the index
     608             :  */
     609             : struct mb_cache_entry *
     610             : mb_cache_entry_find_first(struct mb_cache *cache, int index,
     611             :                           struct block_device *bdev, unsigned int key)
     612             : {
     613           0 :         unsigned int bucket = hash_long(key, cache->c_bucket_bits);
     614           0 :         struct list_head *l;
     615           0 :         struct mb_cache_entry *ce;
     616           0 : 
     617             :         mb_assert(index < mb_cache_indexes(cache));
     618           0 :         spin_lock(&mb_cache_spinlock);
     619           0 :         l = cache->c_indexes_hash[index][bucket].next;
     620           0 :         ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
     621             :                                    index, bdev, key);
     622           0 :         spin_unlock(&mb_cache_spinlock);
     623           0 :         return ce;
     624             : }
     625             : 
     626             : 
     627             : /*
     628             :  * mb_cache_entry_find_next()
     629             :  *
     630             :  * Find the next cache entry on a given device with a certain key in an
     631             :  * additional index. Returns NULL if no match could be found. The previous
     632             :  * entry is atomatically released, so that mb_cache_entry_find_next() can
     633             :  * be called like this:
     634             :  *
     635             :  * entry = mb_cache_entry_find_first();
     636             :  * while (entry) {
     637             :  *      ...
     638             :  *      entry = mb_cache_entry_find_next(entry, ...);
     639             :  * }
     640             :  *
     641             :  * @prev: The previous match
     642             :  * @index: the number of the additonal index to search (0<=index<indexes_count)
     643             :  * @bdev: the device the cache entry should belong to
     644             :  * @key: the key in the index
     645             :  */
     646             : struct mb_cache_entry *
     647             : mb_cache_entry_find_next(struct mb_cache_entry *prev, int index,
     648             :                          struct block_device *bdev, unsigned int key)
     649             : {
     650           0 :         struct mb_cache *cache = prev->e_cache;
     651           0 :         unsigned int bucket = hash_long(key, cache->c_bucket_bits);
     652           0 :         struct list_head *l;
     653           0 :         struct mb_cache_entry *ce;
     654           0 : 
     655             :         mb_assert(index < mb_cache_indexes(cache));
     656           0 :         spin_lock(&mb_cache_spinlock);
     657           0 :         l = prev->e_indexes[index].o_list.next;
     658           0 :         ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
     659             :                                    index, bdev, key);
     660           0 :         __mb_cache_entry_release_unlock(prev);
     661           0 :         return ce;
     662             : }
     663             : 
     664             : #endif  /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
     665             : 
     666             : static int __init init_mbcache(void)
     667             : {
     668           1 :         register_shrinker(&mb_cache_shrinker);
     669           1 :         return 0;
     670             : }
     671             : 
     672             : static void __exit exit_mbcache(void)
     673             : {
     674           2 :         unregister_shrinker(&mb_cache_shrinker);
     675           2 : }
     676             : 
     677             : module_init(init_mbcache)
     678             : module_exit(exit_mbcache)
     679           1 : 

Generated by: LCOV version 1.10