struct list_head c_cache_list;
const char *c_name;
atomic_t c_entry_count;
+ int c_max_entries;
int c_bucket_bits;
struct kmem_cache *c_entry_cache;
struct list_head *c_block_hash;
mb_cache_shrink_fn(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
{
LIST_HEAD(free_list);
- struct list_head *l, *ltmp;
+ struct mb_cache *cache;
+ struct mb_cache_entry *entry, *tmp;
int count = 0;
- spin_lock(&mb_cache_spinlock);
- list_for_each(l, &mb_cache_list) {
- struct mb_cache *cache =
- list_entry(l, struct mb_cache, c_cache_list);
- mb_debug("cache %s (%d)", cache->c_name,
- atomic_read(&cache->c_entry_count));
- count += atomic_read(&cache->c_entry_count);
- }
mb_debug("trying to free %d entries", nr_to_scan);
- if (nr_to_scan == 0) {
- spin_unlock(&mb_cache_spinlock);
- goto out;
- }
+ spin_lock(&mb_cache_spinlock);
while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
struct mb_cache_entry *ce =
list_entry(mb_cache_lru_list.next,
list_move_tail(&ce->e_lru_list, &free_list);
__mb_cache_entry_unhash(ce);
}
+ list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
+ mb_debug("cache %s (%d)", cache->c_name,
+ atomic_read(&cache->c_entry_count));
+ count += atomic_read(&cache->c_entry_count);
+ }
spin_unlock(&mb_cache_spinlock);
- list_for_each_safe(l, ltmp, &free_list) {
- __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
- e_lru_list), gfp_mask);
+ list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
+ __mb_cache_entry_forget(entry, gfp_mask);
}
-out:
return (count / 100) * sysctl_vfs_cache_pressure;
}
if (!cache->c_entry_cache)
goto fail2;
+ /*
+ * Set an upper limit on the number of cache entries so that the hash
+ * chains won't grow too long.
+ */
+ cache->c_max_entries = bucket_count << 4;
+
spin_lock(&mb_cache_spinlock);
list_add(&cache->c_cache_list, &mb_cache_list);
spin_unlock(&mb_cache_spinlock);
kfree(cache);
}
-
/*
* mb_cache_entry_alloc()
*
struct mb_cache_entry *
mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
{
- struct mb_cache_entry *ce;
-
- ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
- if (ce) {
+ struct mb_cache_entry *ce = NULL;
+
+ if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
+ spin_lock(&mb_cache_spinlock);
+ if (!list_empty(&mb_cache_lru_list)) {
+ ce = list_entry(mb_cache_lru_list.next,
+ struct mb_cache_entry, e_lru_list);
+ list_del_init(&ce->e_lru_list);
+ __mb_cache_entry_unhash(ce);
+ }
+ spin_unlock(&mb_cache_spinlock);
+ }
+ if (!ce) {
+ ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
+ if (!ce)
+ return NULL;
atomic_inc(&cache->c_entry_count);
INIT_LIST_HEAD(&ce->e_lru_list);
INIT_LIST_HEAD(&ce->e_block_list);
ce->e_cache = cache;
- ce->e_used = 1 + MB_CACHE_WRITER;
ce->e_queued = 0;
}
+ ce->e_used = 1 + MB_CACHE_WRITER;
return ce;
}