2 * Request reply cache. This is currently a global cache, but this may
3 * change in the future and be a per-client cache.
5 * This code is heavily inspired by the 44BSD implementation, although
6 * it does things a bit differently.
8 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
11 #include <linux/slab.h>
12 #include <linux/sunrpc/addr.h>
13 #include <linux/highmem.h>
14 #include <linux/log2.h>
15 #include <linux/hash.h>
16 #include <net/checksum.h>
21 #define NFSDDBG_FACILITY NFSDDBG_REPCACHE
24 * We use this value to determine the number of hash buckets from the max
25 * cache size, the idea being that when the cache is at its maximum number
26 * of entries, then this should be the average number of entries per bucket.
28 #define TARGET_BUCKET_SIZE 64
30 static struct hlist_head * cache_hash;
31 static struct list_head lru_head;
32 static struct kmem_cache *drc_slab;
34 /* max number of entries allowed in the cache */
35 static unsigned int max_drc_entries;
37 /* number of significant bits in the hash value */
38 static unsigned int maskbits;
41 * Stats and other tracking of on the duplicate reply cache. All of these and
42 * the "rc" fields in nfsdstats are protected by the cache_lock
45 /* total number of entries */
46 static unsigned int num_drc_entries;
48 /* cache misses due only to checksum comparison failures */
49 static unsigned int payload_misses;
51 /* amount of memory (in bytes) currently consumed by the DRC */
52 static unsigned int drc_mem_usage;
54 /* longest hash chain seen */
55 static unsigned int longest_chain;
57 /* size of cache when we saw the longest hash chain */
58 static unsigned int longest_chain_cachesize;
60 static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
61 static void cache_cleaner_func(struct work_struct *unused);
62 static int nfsd_reply_cache_shrink(struct shrinker *shrink,
63 struct shrink_control *sc);
65 static struct shrinker nfsd_reply_cache_shrinker = {
66 .shrink = nfsd_reply_cache_shrink,
71 * locking for the reply cache:
72 * A cache entry is "single use" if c_state == RC_INPROG
73 * Otherwise, it when accessing _prev or _next, the lock must be held.
75 static DEFINE_SPINLOCK(cache_lock);
76 static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
79 * Put a cap on the size of the DRC based on the amount of available
80 * low memory in the machine.
92 * ...with a hard cap of 256k entries. In the worst case, each entry will be
93 * ~1k, so the above numbers should give a rough max of the amount of memory
97 nfsd_cache_size_limit(void)
100 unsigned long low_pages = totalram_pages - totalhigh_pages;
102 limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
103 return min_t(unsigned int, limit, 256*1024);
107 * Compute the number of hash buckets we need. Divide the max cachesize by
108 * the "target" max bucket size, and round up to next power of two.
111 nfsd_hashsize(unsigned int limit)
113 return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
116 static struct svc_cacherep *
117 nfsd_reply_cache_alloc(void)
119 struct svc_cacherep *rp;
121 rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
123 rp->c_state = RC_UNUSED;
124 rp->c_type = RC_NOCACHE;
125 INIT_LIST_HEAD(&rp->c_lru);
126 INIT_HLIST_NODE(&rp->c_hash);
132 nfsd_reply_cache_unhash(struct svc_cacherep *rp)
134 hlist_del_init(&rp->c_hash);
135 list_del_init(&rp->c_lru);
139 nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
141 if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
142 drc_mem_usage -= rp->c_replvec.iov_len;
143 kfree(rp->c_replvec.iov_base);
145 if (!hlist_unhashed(&rp->c_hash))
146 hlist_del(&rp->c_hash);
147 list_del(&rp->c_lru);
149 drc_mem_usage -= sizeof(*rp);
150 kmem_cache_free(drc_slab, rp);
154 nfsd_reply_cache_free(struct svc_cacherep *rp)
156 spin_lock(&cache_lock);
157 nfsd_reply_cache_free_locked(rp);
158 spin_unlock(&cache_lock);
161 int nfsd_reply_cache_init(void)
163 unsigned int hashsize;
165 INIT_LIST_HEAD(&lru_head);
166 max_drc_entries = nfsd_cache_size_limit();
168 hashsize = nfsd_hashsize(max_drc_entries);
169 maskbits = ilog2(hashsize);
171 register_shrinker(&nfsd_reply_cache_shrinker);
172 drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
177 cache_hash = kcalloc(hashsize, sizeof(struct hlist_head), GFP_KERNEL);
183 printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
184 nfsd_reply_cache_shutdown();
188 void nfsd_reply_cache_shutdown(void)
190 struct svc_cacherep *rp;
192 unregister_shrinker(&nfsd_reply_cache_shrinker);
193 cancel_delayed_work_sync(&cache_cleaner);
195 while (!list_empty(&lru_head)) {
196 rp = list_entry(lru_head.next, struct svc_cacherep, c_lru);
197 nfsd_reply_cache_free_locked(rp);
204 kmem_cache_destroy(drc_slab);
210 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
211 * not already scheduled.
214 lru_put_end(struct svc_cacherep *rp)
216 rp->c_timestamp = jiffies;
217 list_move_tail(&rp->c_lru, &lru_head);
218 schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
222 * Move a cache entry from one hash list to another
225 hash_refile(struct svc_cacherep *rp)
227 hlist_del_init(&rp->c_hash);
228 hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits));
232 nfsd_cache_entry_expired(struct svc_cacherep *rp)
234 return rp->c_state != RC_INPROG &&
235 time_after(jiffies, rp->c_timestamp + RC_EXPIRE);
239 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
240 * Also prune the oldest ones when the total exceeds the max number of entries.
243 prune_cache_entries(void)
245 struct svc_cacherep *rp, *tmp;
247 list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
248 if (!nfsd_cache_entry_expired(rp) &&
249 num_drc_entries <= max_drc_entries)
251 nfsd_reply_cache_free_locked(rp);
255 * Conditionally rearm the job. If we cleaned out the list, then
256 * cancel any pending run (since there won't be any work to do).
257 * Otherwise, we rearm the job or modify the existing one to run in
258 * RC_EXPIRE since we just ran the pruner.
260 if (list_empty(&lru_head))
261 cancel_delayed_work(&cache_cleaner);
263 mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
267 cache_cleaner_func(struct work_struct *unused)
269 spin_lock(&cache_lock);
270 prune_cache_entries();
271 spin_unlock(&cache_lock);
275 nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc)
279 spin_lock(&cache_lock);
281 prune_cache_entries();
282 num = num_drc_entries;
283 spin_unlock(&cache_lock);
289 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
292 nfsd_cache_csum(struct svc_rqst *rqstp)
297 struct xdr_buf *buf = &rqstp->rq_arg;
298 const unsigned char *p = buf->head[0].iov_base;
299 size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
301 size_t len = min(buf->head[0].iov_len, csum_len);
303 /* rq_arg.head first */
304 csum = csum_partial(p, len, 0);
307 /* Continue into page array */
308 idx = buf->page_base / PAGE_SIZE;
309 base = buf->page_base & ~PAGE_MASK;
311 p = page_address(buf->pages[idx]) + base;
312 len = min_t(size_t, PAGE_SIZE - base, csum_len);
313 csum = csum_partial(p, len, csum);
322 nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
324 /* Check RPC header info first */
325 if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc ||
326 rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers ||
327 rqstp->rq_arg.len != rp->c_len ||
328 !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
329 rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
332 /* compare checksum of NFS data */
333 if (csum != rp->c_csum) {
342 * Search the request hash for an entry that matches the given rqstp.
343 * Must be called with cache_lock held. Returns the found entry or
346 static struct svc_cacherep *
347 nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum)
349 struct svc_cacherep *rp, *ret = NULL;
350 struct hlist_head *rh;
351 unsigned int entries = 0;
353 rh = &cache_hash[hash_32(rqstp->rq_xid, maskbits)];
354 hlist_for_each_entry(rp, rh, c_hash) {
356 if (nfsd_cache_match(rqstp, csum, rp)) {
362 /* tally hash chain length stats */
363 if (entries > longest_chain) {
364 longest_chain = entries;
365 longest_chain_cachesize = num_drc_entries;
366 } else if (entries == longest_chain) {
367 /* prefer to keep the smallest cachesize possible here */
368 longest_chain_cachesize = min(longest_chain_cachesize,
376 * Try to find an entry matching the current call in the cache. When none
377 * is found, we try to grab the oldest expired entry off the LRU list. If
378 * a suitable one isn't there, then drop the cache_lock and allocate a
379 * new one, then search again in case one got inserted while this thread
380 * didn't hold the lock.
383 nfsd_cache_lookup(struct svc_rqst *rqstp)
385 struct svc_cacherep *rp, *found;
386 __be32 xid = rqstp->rq_xid;
387 u32 proto = rqstp->rq_prot,
388 vers = rqstp->rq_vers,
389 proc = rqstp->rq_proc;
392 int type = rqstp->rq_cachetype;
395 rqstp->rq_cacherep = NULL;
396 if (type == RC_NOCACHE) {
397 nfsdstats.rcnocache++;
401 csum = nfsd_cache_csum(rqstp);
404 * Since the common case is a cache miss followed by an insert,
405 * preallocate an entry. First, try to reuse the first entry on the LRU
406 * if it works, then go ahead and prune the LRU list.
408 spin_lock(&cache_lock);
409 if (!list_empty(&lru_head)) {
410 rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
411 if (nfsd_cache_entry_expired(rp) ||
412 num_drc_entries >= max_drc_entries) {
413 nfsd_reply_cache_unhash(rp);
414 prune_cache_entries();
419 /* No expired ones available, allocate a new one. */
420 spin_unlock(&cache_lock);
421 rp = nfsd_reply_cache_alloc();
422 spin_lock(&cache_lock);
425 drc_mem_usage += sizeof(*rp);
429 found = nfsd_cache_search(rqstp, csum);
432 nfsd_reply_cache_free_locked(rp);
438 dprintk("nfsd: unable to allocate DRC entry!\n");
443 * We're keeping the one we just allocated. Are we now over the
444 * limit? Prune one off the tip of the LRU in trade for the one we
445 * just allocated if so.
447 if (num_drc_entries >= max_drc_entries)
448 nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
449 struct svc_cacherep, c_lru));
451 nfsdstats.rcmisses++;
452 rqstp->rq_cacherep = rp;
453 rp->c_state = RC_INPROG;
456 rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
457 rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
460 rp->c_len = rqstp->rq_arg.len;
466 /* release any buffer */
467 if (rp->c_type == RC_REPLBUFF) {
468 drc_mem_usage -= rp->c_replvec.iov_len;
469 kfree(rp->c_replvec.iov_base);
470 rp->c_replvec.iov_base = NULL;
472 rp->c_type = RC_NOCACHE;
474 spin_unlock(&cache_lock);
479 /* We found a matching entry which is either in progress or done. */
480 age = jiffies - rp->c_timestamp;
484 /* Request being processed or excessive rexmits */
485 if (rp->c_state == RC_INPROG || age < RC_DELAY)
488 /* From the hall of fame of impractical attacks:
489 * Is this a user who tries to snoop on the cache? */
491 if (!rqstp->rq_secure && rp->c_secure)
494 /* Compose RPC reply header */
495 switch (rp->c_type) {
499 svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
503 if (!nfsd_cache_append(rqstp, &rp->c_replvec))
504 goto out; /* should not happen */
508 printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
509 nfsd_reply_cache_free_locked(rp);
516 * Update a cache entry. This is called from nfsd_dispatch when
517 * the procedure has been executed and the complete reply is in
520 * We're copying around data here rather than swapping buffers because
521 * the toplevel loop requires max-sized buffers, which would be a waste
522 * of memory for a cache with a max reply size of 100 bytes (diropokres).
524 * If we should start to use different types of cache entries tailored
525 * specifically for attrstat and fh's, we may save even more space.
527 * Also note that a cachetype of RC_NOCACHE can legally be passed when
528 * nfsd failed to encode a reply that otherwise would have been cached.
529 * In this case, nfsd_cache_update is called with statp == NULL.
532 nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
534 struct svc_cacherep *rp = rqstp->rq_cacherep;
535 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
542 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
545 /* Don't cache excessive amounts of data and XDR failures */
546 if (!statp || len > (256 >> 2)) {
547 nfsd_reply_cache_free(rp);
554 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
555 rp->c_replstat = *statp;
558 cachv = &rp->c_replvec;
560 cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
561 if (!cachv->iov_base) {
562 nfsd_reply_cache_free(rp);
565 cachv->iov_len = bufsize;
566 memcpy(cachv->iov_base, statp, bufsize);
569 nfsd_reply_cache_free(rp);
572 spin_lock(&cache_lock);
573 drc_mem_usage += bufsize;
575 rp->c_secure = rqstp->rq_secure;
576 rp->c_type = cachetype;
577 rp->c_state = RC_DONE;
578 spin_unlock(&cache_lock);
583 * Copy cached reply to current reply buffer. Should always fit.
584 * FIXME as reply is in a page, we should just attach the page, and
585 * keep a refcount....
588 nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
590 struct kvec *vec = &rqstp->rq_res.head[0];
592 if (vec->iov_len + data->iov_len > PAGE_SIZE) {
593 printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
597 memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
598 vec->iov_len += data->iov_len;
603 * Note that fields may be added, removed or reordered in the future. Programs
604 * scraping this file for info should test the labels to ensure they're
605 * getting the correct field.
607 static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
609 spin_lock(&cache_lock);
610 seq_printf(m, "max entries: %u\n", max_drc_entries);
611 seq_printf(m, "num entries: %u\n", num_drc_entries);
612 seq_printf(m, "hash buckets: %u\n", 1 << maskbits);
613 seq_printf(m, "mem usage: %u\n", drc_mem_usage);
614 seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
615 seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses);
616 seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
617 seq_printf(m, "payload misses: %u\n", payload_misses);
618 seq_printf(m, "longest chain len: %u\n", longest_chain);
619 seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize);
620 spin_unlock(&cache_lock);
624 int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
626 return single_open(file, nfsd_reply_cache_stats_show, NULL);