mbcache: Speed up cache entry creation

In order to prevent redundant entry creation by racing against itself,
mb_cache_entry_create scans through a large hash-list of all current
entries in order to see if another allocation for the requested new
entry has been made. Furthermore, it allocates memory for a new entry
before scanning through this hash-list, which results in that allocated
memory being discarded when the requested new entry is already present.
This happens more than half the time.

Speed up cache entry creation by keeping a small linked list of
requested new entries in progress, and scanning through that first
instead of the large hash-list. Additionally, don't bother allocating
memory for a new entry until it's known that the allocated memory will
be used.

Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
Signed-off-by: Ruchit <ruchitmarathe@gmail.com>
fourteen
Sultan Alsawaf 4 years ago committed by Jenna
parent 899365ef6e
commit c1433d926f
  1. 81
      fs/mbcache.c

@ -26,7 +26,7 @@
struct mb_cache { struct mb_cache {
/* Hash table of entries */ /* Hash table of entries */
struct hlist_bl_head *c_hash; struct mb_bucket *c_bucket;
/* log2 of hash table size */ /* log2 of hash table size */
int c_bucket_bits; int c_bucket_bits;
/* Maximum entries in cache to avoid degrading hash too much */ /* Maximum entries in cache to avoid degrading hash too much */
@ -41,6 +41,17 @@ struct mb_cache {
struct work_struct c_shrink_work; struct work_struct c_shrink_work;
}; };
struct mb_bucket {
struct hlist_bl_head hash;
struct list_head req_list;
};
struct mb_cache_req {
struct list_head lnode;
u32 e_key;
u64 e_value;
};
static struct kmem_cache *mb_entry_cache; static struct kmem_cache *mb_entry_cache;
static unsigned long mb_cache_shrink(struct mb_cache *cache, static unsigned long mb_cache_shrink(struct mb_cache *cache,
@ -49,7 +60,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache, static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
u32 key) u32 key)
{ {
return &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; return &cache->c_bucket[hash_32(key, cache->c_bucket_bits)].hash;
} }
/* /*
@ -76,6 +87,11 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
struct mb_cache_entry *entry, *dup; struct mb_cache_entry *entry, *dup;
struct hlist_bl_node *dup_node; struct hlist_bl_node *dup_node;
struct hlist_bl_head *head; struct hlist_bl_head *head;
struct mb_cache_req *tmp_req, req = {
.e_key = key,
.e_value = value
};
struct mb_bucket *bucket;
/* Schedule background reclaim if there are too many entries */ /* Schedule background reclaim if there are too many entries */
if (cache->c_entry_count >= cache->c_max_entries) if (cache->c_entry_count >= cache->c_max_entries)
@ -84,33 +100,48 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
if (cache->c_entry_count >= 2*cache->c_max_entries) if (cache->c_entry_count >= 2*cache->c_max_entries)
mb_cache_shrink(cache, SYNC_SHRINK_BATCH); mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
entry = kmem_cache_alloc(mb_entry_cache, mask); bucket = &cache->c_bucket[hash_32(key, cache->c_bucket_bits)];
if (!entry) head = &bucket->hash;
return -ENOMEM;
INIT_LIST_HEAD(&entry->e_list);
/* One ref for hash, one ref returned */
atomic_set(&entry->e_refcnt, 1);
entry->e_key = key;
entry->e_value = value;
entry->e_reusable = reusable;
entry->e_referenced = 0;
head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head); hlist_bl_lock(head);
list_for_each_entry(tmp_req, &bucket->req_list, lnode) {
if (tmp_req->e_key == key && tmp_req->e_value == value) {
hlist_bl_unlock(head);
return -EBUSY;
}
}
hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
if (dup->e_key == key && dup->e_value == value) { if (dup->e_key == key && dup->e_value == value) {
hlist_bl_unlock(head); hlist_bl_unlock(head);
kmem_cache_free(mb_entry_cache, entry);
return -EBUSY; return -EBUSY;
} }
} }
list_add(&req.lnode, &bucket->req_list);
hlist_bl_unlock(head);
entry = kmem_cache_alloc(mb_entry_cache, mask);
if (!entry) {
hlist_bl_lock(head);
list_del(&req.lnode);
hlist_bl_unlock(head);
return -ENOMEM;
}
*entry = (typeof(*entry)){
.e_list = LIST_HEAD_INIT(entry->e_list),
/* One ref for hash, one ref returned */
.e_refcnt = ATOMIC_INIT(2),
.e_key = key,
.e_value = value,
.e_reusable = reusable
};
hlist_bl_lock(head);
list_del(&req.lnode);
hlist_bl_add_head(&entry->e_hash_list, head); hlist_bl_add_head(&entry->e_hash_list, head);
hlist_bl_unlock(head); hlist_bl_unlock(head);
spin_lock(&cache->c_list_lock); spin_lock(&cache->c_list_lock);
list_add_tail(&entry->e_list, &cache->c_list); list_add_tail(&entry->e_list, &cache->c_list);
/* Grab ref for LRU list */
atomic_inc(&entry->e_refcnt);
cache->c_entry_count++; cache->c_entry_count++;
spin_unlock(&cache->c_list_lock); spin_unlock(&cache->c_list_lock);
@ -351,20 +382,22 @@ struct mb_cache *mb_cache_create(int bucket_bits)
cache->c_max_entries = bucket_count << 4; cache->c_max_entries = bucket_count << 4;
INIT_LIST_HEAD(&cache->c_list); INIT_LIST_HEAD(&cache->c_list);
spin_lock_init(&cache->c_list_lock); spin_lock_init(&cache->c_list_lock);
cache->c_hash = kmalloc(bucket_count * sizeof(struct hlist_bl_head), cache->c_bucket = kmalloc(bucket_count * sizeof(*cache->c_bucket),
GFP_KERNEL); GFP_KERNEL);
if (!cache->c_hash) { if (!cache->c_bucket) {
kfree(cache); kfree(cache);
goto err_out; goto err_out;
} }
for (i = 0; i < bucket_count; i++) for (i = 0; i < bucket_count; i++) {
INIT_HLIST_BL_HEAD(&cache->c_hash[i]); INIT_HLIST_BL_HEAD(&cache->c_bucket[i].hash);
INIT_LIST_HEAD(&cache->c_bucket[i].req_list);
}
cache->c_shrink.count_objects = mb_cache_count; cache->c_shrink.count_objects = mb_cache_count;
cache->c_shrink.scan_objects = mb_cache_scan; cache->c_shrink.scan_objects = mb_cache_scan;
cache->c_shrink.seeks = DEFAULT_SEEKS; cache->c_shrink.seeks = DEFAULT_SEEKS;
if (register_shrinker(&cache->c_shrink)) { if (register_shrinker(&cache->c_shrink)) {
kfree(cache->c_hash); kfree(cache->c_bucket);
kfree(cache); kfree(cache);
goto err_out; goto err_out;
} }
@ -405,7 +438,7 @@ void mb_cache_destroy(struct mb_cache *cache)
WARN_ON(atomic_read(&entry->e_refcnt) != 1); WARN_ON(atomic_read(&entry->e_refcnt) != 1);
mb_cache_entry_put(cache, entry); mb_cache_entry_put(cache, entry);
} }
kfree(cache->c_hash); kfree(cache->c_bucket);
kfree(cache); kfree(cache);
} }
EXPORT_SYMBOL(mb_cache_destroy); EXPORT_SYMBOL(mb_cache_destroy);

Loading…
Cancel
Save