root/net/sunrpc/cache.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cache_init
  2. sunrpc_cache_find_rcu
  3. sunrpc_cache_add_entry
  4. sunrpc_cache_lookup_rcu
  5. cache_fresh_locked
  6. cache_fresh_unlocked
  7. sunrpc_cache_update
  8. cache_make_upcall
  9. cache_is_valid
  10. try_to_negate_entry
  11. cache_check
  12. sunrpc_init_cache_detail
  13. sunrpc_destroy_cache_detail
  14. cache_clean
  15. do_cache_clean
  16. cache_flush
  17. cache_purge
  18. __unhash_deferred_req
  19. __hash_deferred_req
  20. setup_deferral
  21. cache_restart_thread
  22. cache_wait_req
  23. cache_limit_defers
  24. cache_defer_req
  25. cache_revisit_request
  26. cache_clean_deferred
  27. cache_request
  28. cache_read
  29. cache_do_downcall
  30. cache_slow_downcall
  31. cache_downcall
  32. cache_write
  33. cache_poll
  34. cache_ioctl
  35. cache_open
  36. cache_release
  37. cache_dequeue
  38. qword_add
  39. qword_addhex
  40. warn_no_listener
  41. cache_listeners_exist
  42. sunrpc_cache_pipe_upcall
  43. qword_get
  44. __cache_seq_start
  45. cache_seq_next
  46. cache_seq_start_rcu
  47. cache_seq_next_rcu
  48. cache_seq_stop_rcu
  49. c_show
  50. content_open
  51. content_release
  52. open_flush
  53. release_flush
  54. read_flush
  55. write_flush
  56. cache_read_procfs
  57. cache_write_procfs
  58. cache_poll_procfs
  59. cache_ioctl_procfs
  60. cache_open_procfs
  61. cache_release_procfs
  62. content_open_procfs
  63. content_release_procfs
  64. open_flush_procfs
  65. release_flush_procfs
  66. read_flush_procfs
  67. write_flush_procfs
  68. remove_cache_proc_entries
  69. create_cache_proc_entries
  70. create_cache_proc_entries
  71. cache_initialize
  72. cache_register_net
  73. cache_unregister_net
  74. cache_create_net
  75. cache_destroy_net
  76. cache_read_pipefs
  77. cache_write_pipefs
  78. cache_poll_pipefs
  79. cache_ioctl_pipefs
  80. cache_open_pipefs
  81. cache_release_pipefs
  82. content_open_pipefs
  83. content_release_pipefs
  84. open_flush_pipefs
  85. release_flush_pipefs
  86. read_flush_pipefs
  87. write_flush_pipefs
  88. sunrpc_cache_register_pipefs
  89. sunrpc_cache_unregister_pipefs
  90. sunrpc_cache_unhash

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * net/sunrpc/cache.c
   4  *
   5  * Generic code for various authentication-related caches
   6  * used by sunrpc clients and servers.
   7  *
   8  * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
   9  */
  10 
  11 #include <linux/types.h>
  12 #include <linux/fs.h>
  13 #include <linux/file.h>
  14 #include <linux/slab.h>
  15 #include <linux/signal.h>
  16 #include <linux/sched.h>
  17 #include <linux/kmod.h>
  18 #include <linux/list.h>
  19 #include <linux/module.h>
  20 #include <linux/ctype.h>
  21 #include <linux/string_helpers.h>
  22 #include <linux/uaccess.h>
  23 #include <linux/poll.h>
  24 #include <linux/seq_file.h>
  25 #include <linux/proc_fs.h>
  26 #include <linux/net.h>
  27 #include <linux/workqueue.h>
  28 #include <linux/mutex.h>
  29 #include <linux/pagemap.h>
  30 #include <asm/ioctls.h>
  31 #include <linux/sunrpc/types.h>
  32 #include <linux/sunrpc/cache.h>
  33 #include <linux/sunrpc/stats.h>
  34 #include <linux/sunrpc/rpc_pipe_fs.h>
  35 #include "netns.h"
  36 
  37 #define  RPCDBG_FACILITY RPCDBG_CACHE
  38 
  39 static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
  40 static void cache_revisit_request(struct cache_head *item);
  41 static bool cache_listeners_exist(struct cache_detail *detail);
  42 
  43 static void cache_init(struct cache_head *h, struct cache_detail *detail)
  44 {
  45         time_t now = seconds_since_boot();
  46         INIT_HLIST_NODE(&h->cache_list);
  47         h->flags = 0;
  48         kref_init(&h->ref);
  49         h->expiry_time = now + CACHE_NEW_EXPIRY;
  50         if (now <= detail->flush_time)
  51                 /* ensure it isn't already expired */
  52                 now = detail->flush_time + 1;
  53         h->last_refresh = now;
  54 }
  55 
  56 static void cache_fresh_unlocked(struct cache_head *head,
  57                                 struct cache_detail *detail);
  58 
  59 static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
  60                                                 struct cache_head *key,
  61                                                 int hash)
  62 {
  63         struct hlist_head *head = &detail->hash_table[hash];
  64         struct cache_head *tmp;
  65 
  66         rcu_read_lock();
  67         hlist_for_each_entry_rcu(tmp, head, cache_list) {
  68                 if (detail->match(tmp, key)) {
  69                         if (cache_is_expired(detail, tmp))
  70                                 continue;
  71                         tmp = cache_get_rcu(tmp);
  72                         rcu_read_unlock();
  73                         return tmp;
  74                 }
  75         }
  76         rcu_read_unlock();
  77         return NULL;
  78 }
  79 
  80 static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
  81                                                  struct cache_head *key,
  82                                                  int hash)
  83 {
  84         struct cache_head *new, *tmp, *freeme = NULL;
  85         struct hlist_head *head = &detail->hash_table[hash];
  86 
  87         new = detail->alloc();
  88         if (!new)
  89                 return NULL;
  90         /* must fully initialise 'new', else
  91          * we might get lose if we need to
  92          * cache_put it soon.
  93          */
  94         cache_init(new, detail);
  95         detail->init(new, key);
  96 
  97         spin_lock(&detail->hash_lock);
  98 
  99         /* check if entry appeared while we slept */
 100         hlist_for_each_entry_rcu(tmp, head, cache_list) {
 101                 if (detail->match(tmp, key)) {
 102                         if (cache_is_expired(detail, tmp)) {
 103                                 hlist_del_init_rcu(&tmp->cache_list);
 104                                 detail->entries --;
 105                                 freeme = tmp;
 106                                 break;
 107                         }
 108                         cache_get(tmp);
 109                         spin_unlock(&detail->hash_lock);
 110                         cache_put(new, detail);
 111                         return tmp;
 112                 }
 113         }
 114 
 115         hlist_add_head_rcu(&new->cache_list, head);
 116         detail->entries++;
 117         cache_get(new);
 118         spin_unlock(&detail->hash_lock);
 119 
 120         if (freeme) {
 121                 cache_fresh_unlocked(freeme, detail);
 122                 cache_put(freeme, detail);
 123         }
 124         return new;
 125 }
 126 
 127 struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
 128                                            struct cache_head *key, int hash)
 129 {
 130         struct cache_head *ret;
 131 
 132         ret = sunrpc_cache_find_rcu(detail, key, hash);
 133         if (ret)
 134                 return ret;
 135         /* Didn't find anything, insert an empty entry */
 136         return sunrpc_cache_add_entry(detail, key, hash);
 137 }
 138 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
 139 
 140 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
 141 
 142 static void cache_fresh_locked(struct cache_head *head, time_t expiry,
 143                                struct cache_detail *detail)
 144 {
 145         time_t now = seconds_since_boot();
 146         if (now <= detail->flush_time)
 147                 /* ensure it isn't immediately treated as expired */
 148                 now = detail->flush_time + 1;
 149         head->expiry_time = expiry;
 150         head->last_refresh = now;
 151         smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
 152         set_bit(CACHE_VALID, &head->flags);
 153 }
 154 
 155 static void cache_fresh_unlocked(struct cache_head *head,
 156                                  struct cache_detail *detail)
 157 {
 158         if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
 159                 cache_revisit_request(head);
 160                 cache_dequeue(detail, head);
 161         }
 162 }
 163 
 164 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
 165                                        struct cache_head *new, struct cache_head *old, int hash)
 166 {
 167         /* The 'old' entry is to be replaced by 'new'.
 168          * If 'old' is not VALID, we update it directly,
 169          * otherwise we need to replace it
 170          */
 171         struct cache_head *tmp;
 172 
 173         if (!test_bit(CACHE_VALID, &old->flags)) {
 174                 spin_lock(&detail->hash_lock);
 175                 if (!test_bit(CACHE_VALID, &old->flags)) {
 176                         if (test_bit(CACHE_NEGATIVE, &new->flags))
 177                                 set_bit(CACHE_NEGATIVE, &old->flags);
 178                         else
 179                                 detail->update(old, new);
 180                         cache_fresh_locked(old, new->expiry_time, detail);
 181                         spin_unlock(&detail->hash_lock);
 182                         cache_fresh_unlocked(old, detail);
 183                         return old;
 184                 }
 185                 spin_unlock(&detail->hash_lock);
 186         }
 187         /* We need to insert a new entry */
 188         tmp = detail->alloc();
 189         if (!tmp) {
 190                 cache_put(old, detail);
 191                 return NULL;
 192         }
 193         cache_init(tmp, detail);
 194         detail->init(tmp, old);
 195 
 196         spin_lock(&detail->hash_lock);
 197         if (test_bit(CACHE_NEGATIVE, &new->flags))
 198                 set_bit(CACHE_NEGATIVE, &tmp->flags);
 199         else
 200                 detail->update(tmp, new);
 201         hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
 202         detail->entries++;
 203         cache_get(tmp);
 204         cache_fresh_locked(tmp, new->expiry_time, detail);
 205         cache_fresh_locked(old, 0, detail);
 206         spin_unlock(&detail->hash_lock);
 207         cache_fresh_unlocked(tmp, detail);
 208         cache_fresh_unlocked(old, detail);
 209         cache_put(old, detail);
 210         return tmp;
 211 }
 212 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
 213 
 214 static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
 215 {
 216         if (cd->cache_upcall)
 217                 return cd->cache_upcall(cd, h);
 218         return sunrpc_cache_pipe_upcall(cd, h);
 219 }
 220 
 221 static inline int cache_is_valid(struct cache_head *h)
 222 {
 223         if (!test_bit(CACHE_VALID, &h->flags))
 224                 return -EAGAIN;
 225         else {
 226                 /* entry is valid */
 227                 if (test_bit(CACHE_NEGATIVE, &h->flags))
 228                         return -ENOENT;
 229                 else {
 230                         /*
 231                          * In combination with write barrier in
 232                          * sunrpc_cache_update, ensures that anyone
 233                          * using the cache entry after this sees the
 234                          * updated contents:
 235                          */
 236                         smp_rmb();
 237                         return 0;
 238                 }
 239         }
 240 }
 241 
 242 static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
 243 {
 244         int rv;
 245 
 246         spin_lock(&detail->hash_lock);
 247         rv = cache_is_valid(h);
 248         if (rv == -EAGAIN) {
 249                 set_bit(CACHE_NEGATIVE, &h->flags);
 250                 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
 251                                    detail);
 252                 rv = -ENOENT;
 253         }
 254         spin_unlock(&detail->hash_lock);
 255         cache_fresh_unlocked(h, detail);
 256         return rv;
 257 }
 258 
 259 /*
 260  * This is the generic cache management routine for all
 261  * the authentication caches.
 262  * It checks the currency of a cache item and will (later)
 263  * initiate an upcall to fill it if needed.
 264  *
 265  *
 266  * Returns 0 if the cache_head can be used, or cache_puts it and returns
 267  * -EAGAIN if upcall is pending and request has been queued
 268  * -ETIMEDOUT if upcall failed or request could not be queue or
 269  *           upcall completed but item is still invalid (implying that
 270  *           the cache item has been replaced with a newer one).
 271  * -ENOENT if cache entry was negative
 272  */
 273 int cache_check(struct cache_detail *detail,
 274                     struct cache_head *h, struct cache_req *rqstp)
 275 {
 276         int rv;
 277         long refresh_age, age;
 278 
 279         /* First decide return status as best we can */
 280         rv = cache_is_valid(h);
 281 
 282         /* now see if we want to start an upcall */
 283         refresh_age = (h->expiry_time - h->last_refresh);
 284         age = seconds_since_boot() - h->last_refresh;
 285 
 286         if (rqstp == NULL) {
 287                 if (rv == -EAGAIN)
 288                         rv = -ENOENT;
 289         } else if (rv == -EAGAIN ||
 290                    (h->expiry_time != 0 && age > refresh_age/2)) {
 291                 dprintk("RPC:       Want update, refage=%ld, age=%ld\n",
 292                                 refresh_age, age);
 293                 if (!test_and_set_bit(CACHE_PENDING, &h->flags)) {
 294                         switch (cache_make_upcall(detail, h)) {
 295                         case -EINVAL:
 296                                 rv = try_to_negate_entry(detail, h);
 297                                 break;
 298                         case -EAGAIN:
 299                                 cache_fresh_unlocked(h, detail);
 300                                 break;
 301                         }
 302                 } else if (!cache_listeners_exist(detail))
 303                         rv = try_to_negate_entry(detail, h);
 304         }
 305 
 306         if (rv == -EAGAIN) {
 307                 if (!cache_defer_req(rqstp, h)) {
 308                         /*
 309                          * Request was not deferred; handle it as best
 310                          * we can ourselves:
 311                          */
 312                         rv = cache_is_valid(h);
 313                         if (rv == -EAGAIN)
 314                                 rv = -ETIMEDOUT;
 315                 }
 316         }
 317         if (rv)
 318                 cache_put(h, detail);
 319         return rv;
 320 }
 321 EXPORT_SYMBOL_GPL(cache_check);
 322 
 323 /*
 324  * caches need to be periodically cleaned.
 325  * For this we maintain a list of cache_detail and
 326  * a current pointer into that list and into the table
 327  * for that entry.
 328  *
 329  * Each time cache_clean is called it finds the next non-empty entry
 330  * in the current table and walks the list in that entry
 331  * looking for entries that can be removed.
 332  *
 333  * An entry gets removed if:
 334  * - The expiry is before current time
 335  * - The last_refresh time is before the flush_time for that cache
 336  *
 337  * later we might drop old entries with non-NEVER expiry if that table
 338  * is getting 'full' for some definition of 'full'
 339  *
 340  * The question of "how often to scan a table" is an interesting one
 341  * and is answered in part by the use of the "nextcheck" field in the
 342  * cache_detail.
 343  * When a scan of a table begins, the nextcheck field is set to a time
 344  * that is well into the future.
 345  * While scanning, if an expiry time is found that is earlier than the
 346  * current nextcheck time, nextcheck is set to that expiry time.
 347  * If the flush_time is ever set to a time earlier than the nextcheck
 348  * time, the nextcheck time is then set to that flush_time.
 349  *
 350  * A table is then only scanned if the current time is at least
 351  * the nextcheck time.
 352  *
 353  */
 354 
 355 static LIST_HEAD(cache_list);
 356 static DEFINE_SPINLOCK(cache_list_lock);
 357 static struct cache_detail *current_detail;
 358 static int current_index;
 359 
 360 static void do_cache_clean(struct work_struct *work);
 361 static struct delayed_work cache_cleaner;
 362 
 363 void sunrpc_init_cache_detail(struct cache_detail *cd)
 364 {
 365         spin_lock_init(&cd->hash_lock);
 366         INIT_LIST_HEAD(&cd->queue);
 367         spin_lock(&cache_list_lock);
 368         cd->nextcheck = 0;
 369         cd->entries = 0;
 370         atomic_set(&cd->writers, 0);
 371         cd->last_close = 0;
 372         cd->last_warn = -1;
 373         list_add(&cd->others, &cache_list);
 374         spin_unlock(&cache_list_lock);
 375 
 376         /* start the cleaning process */
 377         queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
 378 }
 379 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
 380 
 381 void sunrpc_destroy_cache_detail(struct cache_detail *cd)
 382 {
 383         cache_purge(cd);
 384         spin_lock(&cache_list_lock);
 385         spin_lock(&cd->hash_lock);
 386         if (current_detail == cd)
 387                 current_detail = NULL;
 388         list_del_init(&cd->others);
 389         spin_unlock(&cd->hash_lock);
 390         spin_unlock(&cache_list_lock);
 391         if (list_empty(&cache_list)) {
 392                 /* module must be being unloaded so its safe to kill the worker */
 393                 cancel_delayed_work_sync(&cache_cleaner);
 394         }
 395 }
 396 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
 397 
 398 /* clean cache tries to find something to clean
 399  * and cleans it.
 400  * It returns 1 if it cleaned something,
 401  *            0 if it didn't find anything this time
 402  *           -1 if it fell off the end of the list.
 403  */
 404 static int cache_clean(void)
 405 {
 406         int rv = 0;
 407         struct list_head *next;
 408 
 409         spin_lock(&cache_list_lock);
 410 
 411         /* find a suitable table if we don't already have one */
 412         while (current_detail == NULL ||
 413             current_index >= current_detail->hash_size) {
 414                 if (current_detail)
 415                         next = current_detail->others.next;
 416                 else
 417                         next = cache_list.next;
 418                 if (next == &cache_list) {
 419                         current_detail = NULL;
 420                         spin_unlock(&cache_list_lock);
 421                         return -1;
 422                 }
 423                 current_detail = list_entry(next, struct cache_detail, others);
 424                 if (current_detail->nextcheck > seconds_since_boot())
 425                         current_index = current_detail->hash_size;
 426                 else {
 427                         current_index = 0;
 428                         current_detail->nextcheck = seconds_since_boot()+30*60;
 429                 }
 430         }
 431 
 432         /* find a non-empty bucket in the table */
 433         while (current_detail &&
 434                current_index < current_detail->hash_size &&
 435                hlist_empty(&current_detail->hash_table[current_index]))
 436                 current_index++;
 437 
 438         /* find a cleanable entry in the bucket and clean it, or set to next bucket */
 439 
 440         if (current_detail && current_index < current_detail->hash_size) {
 441                 struct cache_head *ch = NULL;
 442                 struct cache_detail *d;
 443                 struct hlist_head *head;
 444                 struct hlist_node *tmp;
 445 
 446                 spin_lock(&current_detail->hash_lock);
 447 
 448                 /* Ok, now to clean this strand */
 449 
 450                 head = &current_detail->hash_table[current_index];
 451                 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
 452                         if (current_detail->nextcheck > ch->expiry_time)
 453                                 current_detail->nextcheck = ch->expiry_time+1;
 454                         if (!cache_is_expired(current_detail, ch))
 455                                 continue;
 456 
 457                         hlist_del_init_rcu(&ch->cache_list);
 458                         current_detail->entries--;
 459                         rv = 1;
 460                         break;
 461                 }
 462 
 463                 spin_unlock(&current_detail->hash_lock);
 464                 d = current_detail;
 465                 if (!ch)
 466                         current_index ++;
 467                 spin_unlock(&cache_list_lock);
 468                 if (ch) {
 469                         set_bit(CACHE_CLEANED, &ch->flags);
 470                         cache_fresh_unlocked(ch, d);
 471                         cache_put(ch, d);
 472                 }
 473         } else
 474                 spin_unlock(&cache_list_lock);
 475 
 476         return rv;
 477 }
 478 
 479 /*
 480  * We want to regularly clean the cache, so we need to schedule some work ...
 481  */
 482 static void do_cache_clean(struct work_struct *work)
 483 {
 484         int delay = 5;
 485         if (cache_clean() == -1)
 486                 delay = round_jiffies_relative(30*HZ);
 487 
 488         if (list_empty(&cache_list))
 489                 delay = 0;
 490 
 491         if (delay)
 492                 queue_delayed_work(system_power_efficient_wq,
 493                                    &cache_cleaner, delay);
 494 }
 495 
 496 
 497 /*
 498  * Clean all caches promptly.  This just calls cache_clean
 499  * repeatedly until we are sure that every cache has had a chance to
 500  * be fully cleaned
 501  */
 502 void cache_flush(void)
 503 {
 504         while (cache_clean() != -1)
 505                 cond_resched();
 506         while (cache_clean() != -1)
 507                 cond_resched();
 508 }
 509 EXPORT_SYMBOL_GPL(cache_flush);
 510 
 511 void cache_purge(struct cache_detail *detail)
 512 {
 513         struct cache_head *ch = NULL;
 514         struct hlist_head *head = NULL;
 515         struct hlist_node *tmp = NULL;
 516         int i = 0;
 517 
 518         spin_lock(&detail->hash_lock);
 519         if (!detail->entries) {
 520                 spin_unlock(&detail->hash_lock);
 521                 return;
 522         }
 523 
 524         dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
 525         for (i = 0; i < detail->hash_size; i++) {
 526                 head = &detail->hash_table[i];
 527                 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
 528                         hlist_del_init_rcu(&ch->cache_list);
 529                         detail->entries--;
 530 
 531                         set_bit(CACHE_CLEANED, &ch->flags);
 532                         spin_unlock(&detail->hash_lock);
 533                         cache_fresh_unlocked(ch, detail);
 534                         cache_put(ch, detail);
 535                         spin_lock(&detail->hash_lock);
 536                 }
 537         }
 538         spin_unlock(&detail->hash_lock);
 539 }
 540 EXPORT_SYMBOL_GPL(cache_purge);
 541 
 542 
 543 /*
 544  * Deferral and Revisiting of Requests.
 545  *
 546  * If a cache lookup finds a pending entry, we
 547  * need to defer the request and revisit it later.
 548  * All deferred requests are stored in a hash table,
 549  * indexed by "struct cache_head *".
 550  * As it may be wasteful to store a whole request
 551  * structure, we allow the request to provide a
 552  * deferred form, which must contain a
 553  * 'struct cache_deferred_req'
 554  * This cache_deferred_req contains a method to allow
 555  * it to be revisited when cache info is available
 556  */
 557 
 558 #define DFR_HASHSIZE    (PAGE_SIZE/sizeof(struct list_head))
 559 #define DFR_HASH(item)  ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
 560 
 561 #define DFR_MAX 300     /* ??? */
 562 
 563 static DEFINE_SPINLOCK(cache_defer_lock);
 564 static LIST_HEAD(cache_defer_list);
 565 static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
 566 static int cache_defer_cnt;
 567 
 568 static void __unhash_deferred_req(struct cache_deferred_req *dreq)
 569 {
 570         hlist_del_init(&dreq->hash);
 571         if (!list_empty(&dreq->recent)) {
 572                 list_del_init(&dreq->recent);
 573                 cache_defer_cnt--;
 574         }
 575 }
 576 
 577 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
 578 {
 579         int hash = DFR_HASH(item);
 580 
 581         INIT_LIST_HEAD(&dreq->recent);
 582         hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
 583 }
 584 
 585 static void setup_deferral(struct cache_deferred_req *dreq,
 586                            struct cache_head *item,
 587                            int count_me)
 588 {
 589 
 590         dreq->item = item;
 591 
 592         spin_lock(&cache_defer_lock);
 593 
 594         __hash_deferred_req(dreq, item);
 595 
 596         if (count_me) {
 597                 cache_defer_cnt++;
 598                 list_add(&dreq->recent, &cache_defer_list);
 599         }
 600 
 601         spin_unlock(&cache_defer_lock);
 602 
 603 }
 604 
 605 struct thread_deferred_req {
 606         struct cache_deferred_req handle;
 607         struct completion completion;
 608 };
 609 
 610 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
 611 {
 612         struct thread_deferred_req *dr =
 613                 container_of(dreq, struct thread_deferred_req, handle);
 614         complete(&dr->completion);
 615 }
 616 
 617 static void cache_wait_req(struct cache_req *req, struct cache_head *item)
 618 {
 619         struct thread_deferred_req sleeper;
 620         struct cache_deferred_req *dreq = &sleeper.handle;
 621 
 622         sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
 623         dreq->revisit = cache_restart_thread;
 624 
 625         setup_deferral(dreq, item, 0);
 626 
 627         if (!test_bit(CACHE_PENDING, &item->flags) ||
 628             wait_for_completion_interruptible_timeout(
 629                     &sleeper.completion, req->thread_wait) <= 0) {
 630                 /* The completion wasn't completed, so we need
 631                  * to clean up
 632                  */
 633                 spin_lock(&cache_defer_lock);
 634                 if (!hlist_unhashed(&sleeper.handle.hash)) {
 635                         __unhash_deferred_req(&sleeper.handle);
 636                         spin_unlock(&cache_defer_lock);
 637                 } else {
 638                         /* cache_revisit_request already removed
 639                          * this from the hash table, but hasn't
 640                          * called ->revisit yet.  It will very soon
 641                          * and we need to wait for it.
 642                          */
 643                         spin_unlock(&cache_defer_lock);
 644                         wait_for_completion(&sleeper.completion);
 645                 }
 646         }
 647 }
 648 
 649 static void cache_limit_defers(void)
 650 {
 651         /* Make sure we haven't exceed the limit of allowed deferred
 652          * requests.
 653          */
 654         struct cache_deferred_req *discard = NULL;
 655 
 656         if (cache_defer_cnt <= DFR_MAX)
 657                 return;
 658 
 659         spin_lock(&cache_defer_lock);
 660 
 661         /* Consider removing either the first or the last */
 662         if (cache_defer_cnt > DFR_MAX) {
 663                 if (prandom_u32() & 1)
 664                         discard = list_entry(cache_defer_list.next,
 665                                              struct cache_deferred_req, recent);
 666                 else
 667                         discard = list_entry(cache_defer_list.prev,
 668                                              struct cache_deferred_req, recent);
 669                 __unhash_deferred_req(discard);
 670         }
 671         spin_unlock(&cache_defer_lock);
 672         if (discard)
 673                 discard->revisit(discard, 1);
 674 }
 675 
 676 /* Return true if and only if a deferred request is queued. */
 677 static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
 678 {
 679         struct cache_deferred_req *dreq;
 680 
 681         if (req->thread_wait) {
 682                 cache_wait_req(req, item);
 683                 if (!test_bit(CACHE_PENDING, &item->flags))
 684                         return false;
 685         }
 686         dreq = req->defer(req);
 687         if (dreq == NULL)
 688                 return false;
 689         setup_deferral(dreq, item, 1);
 690         if (!test_bit(CACHE_PENDING, &item->flags))
 691                 /* Bit could have been cleared before we managed to
 692                  * set up the deferral, so need to revisit just in case
 693                  */
 694                 cache_revisit_request(item);
 695 
 696         cache_limit_defers();
 697         return true;
 698 }
 699 
 700 static void cache_revisit_request(struct cache_head *item)
 701 {
 702         struct cache_deferred_req *dreq;
 703         struct list_head pending;
 704         struct hlist_node *tmp;
 705         int hash = DFR_HASH(item);
 706 
 707         INIT_LIST_HEAD(&pending);
 708         spin_lock(&cache_defer_lock);
 709 
 710         hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
 711                 if (dreq->item == item) {
 712                         __unhash_deferred_req(dreq);
 713                         list_add(&dreq->recent, &pending);
 714                 }
 715 
 716         spin_unlock(&cache_defer_lock);
 717 
 718         while (!list_empty(&pending)) {
 719                 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
 720                 list_del_init(&dreq->recent);
 721                 dreq->revisit(dreq, 0);
 722         }
 723 }
 724 
 725 void cache_clean_deferred(void *owner)
 726 {
 727         struct cache_deferred_req *dreq, *tmp;
 728         struct list_head pending;
 729 
 730 
 731         INIT_LIST_HEAD(&pending);
 732         spin_lock(&cache_defer_lock);
 733 
 734         list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
 735                 if (dreq->owner == owner) {
 736                         __unhash_deferred_req(dreq);
 737                         list_add(&dreq->recent, &pending);
 738                 }
 739         }
 740         spin_unlock(&cache_defer_lock);
 741 
 742         while (!list_empty(&pending)) {
 743                 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
 744                 list_del_init(&dreq->recent);
 745                 dreq->revisit(dreq, 1);
 746         }
 747 }
 748 
 749 /*
 750  * communicate with user-space
 751  *
 752  * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
 753  * On read, you get a full request, or block.
 754  * On write, an update request is processed.
 755  * Poll works if anything to read, and always allows write.
 756  *
 757  * Implemented by linked list of requests.  Each open file has
 758  * a ->private that also exists in this list.  New requests are added
 759  * to the end and may wakeup and preceding readers.
 760  * New readers are added to the head.  If, on read, an item is found with
 761  * CACHE_UPCALLING clear, we free it from the list.
 762  *
 763  */
 764 
 765 static DEFINE_SPINLOCK(queue_lock);
 766 static DEFINE_MUTEX(queue_io_mutex);
 767 
 768 struct cache_queue {
 769         struct list_head        list;
 770         int                     reader; /* if 0, then request */
 771 };
 772 struct cache_request {
 773         struct cache_queue      q;
 774         struct cache_head       *item;
 775         char                    * buf;
 776         int                     len;
 777         int                     readers;
 778 };
 779 struct cache_reader {
 780         struct cache_queue      q;
 781         int                     offset; /* if non-0, we have a refcnt on next request */
 782 };
 783 
 784 static int cache_request(struct cache_detail *detail,
 785                                struct cache_request *crq)
 786 {
 787         char *bp = crq->buf;
 788         int len = PAGE_SIZE;
 789 
 790         detail->cache_request(detail, crq->item, &bp, &len);
 791         if (len < 0)
 792                 return -EAGAIN;
 793         return PAGE_SIZE - len;
 794 }
 795 
 796 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
 797                           loff_t *ppos, struct cache_detail *cd)
 798 {
 799         struct cache_reader *rp = filp->private_data;
 800         struct cache_request *rq;
 801         struct inode *inode = file_inode(filp);
 802         int err;
 803 
 804         if (count == 0)
 805                 return 0;
 806 
 807         inode_lock(inode); /* protect against multiple concurrent
 808                               * readers on this file */
 809  again:
 810         spin_lock(&queue_lock);
 811         /* need to find next request */
 812         while (rp->q.list.next != &cd->queue &&
 813                list_entry(rp->q.list.next, struct cache_queue, list)
 814                ->reader) {
 815                 struct list_head *next = rp->q.list.next;
 816                 list_move(&rp->q.list, next);
 817         }
 818         if (rp->q.list.next == &cd->queue) {
 819                 spin_unlock(&queue_lock);
 820                 inode_unlock(inode);
 821                 WARN_ON_ONCE(rp->offset);
 822                 return 0;
 823         }
 824         rq = container_of(rp->q.list.next, struct cache_request, q.list);
 825         WARN_ON_ONCE(rq->q.reader);
 826         if (rp->offset == 0)
 827                 rq->readers++;
 828         spin_unlock(&queue_lock);
 829 
 830         if (rq->len == 0) {
 831                 err = cache_request(cd, rq);
 832                 if (err < 0)
 833                         goto out;
 834                 rq->len = err;
 835         }
 836 
 837         if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
 838                 err = -EAGAIN;
 839                 spin_lock(&queue_lock);
 840                 list_move(&rp->q.list, &rq->q.list);
 841                 spin_unlock(&queue_lock);
 842         } else {
 843                 if (rp->offset + count > rq->len)
 844                         count = rq->len - rp->offset;
 845                 err = -EFAULT;
 846                 if (copy_to_user(buf, rq->buf + rp->offset, count))
 847                         goto out;
 848                 rp->offset += count;
 849                 if (rp->offset >= rq->len) {
 850                         rp->offset = 0;
 851                         spin_lock(&queue_lock);
 852                         list_move(&rp->q.list, &rq->q.list);
 853                         spin_unlock(&queue_lock);
 854                 }
 855                 err = 0;
 856         }
 857  out:
 858         if (rp->offset == 0) {
 859                 /* need to release rq */
 860                 spin_lock(&queue_lock);
 861                 rq->readers--;
 862                 if (rq->readers == 0 &&
 863                     !test_bit(CACHE_PENDING, &rq->item->flags)) {
 864                         list_del(&rq->q.list);
 865                         spin_unlock(&queue_lock);
 866                         cache_put(rq->item, cd);
 867                         kfree(rq->buf);
 868                         kfree(rq);
 869                 } else
 870                         spin_unlock(&queue_lock);
 871         }
 872         if (err == -EAGAIN)
 873                 goto again;
 874         inode_unlock(inode);
 875         return err ? err :  count;
 876 }
 877 
 878 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
 879                                  size_t count, struct cache_detail *cd)
 880 {
 881         ssize_t ret;
 882 
 883         if (count == 0)
 884                 return -EINVAL;
 885         if (copy_from_user(kaddr, buf, count))
 886                 return -EFAULT;
 887         kaddr[count] = '\0';
 888         ret = cd->cache_parse(cd, kaddr, count);
 889         if (!ret)
 890                 ret = count;
 891         return ret;
 892 }
 893 
 894 static ssize_t cache_slow_downcall(const char __user *buf,
 895                                    size_t count, struct cache_detail *cd)
 896 {
 897         static char write_buf[8192]; /* protected by queue_io_mutex */
 898         ssize_t ret = -EINVAL;
 899 
 900         if (count >= sizeof(write_buf))
 901                 goto out;
 902         mutex_lock(&queue_io_mutex);
 903         ret = cache_do_downcall(write_buf, buf, count, cd);
 904         mutex_unlock(&queue_io_mutex);
 905 out:
 906         return ret;
 907 }
 908 
 909 static ssize_t cache_downcall(struct address_space *mapping,
 910                               const char __user *buf,
 911                               size_t count, struct cache_detail *cd)
 912 {
 913         struct page *page;
 914         char *kaddr;
 915         ssize_t ret = -ENOMEM;
 916 
 917         if (count >= PAGE_SIZE)
 918                 goto out_slow;
 919 
 920         page = find_or_create_page(mapping, 0, GFP_KERNEL);
 921         if (!page)
 922                 goto out_slow;
 923 
 924         kaddr = kmap(page);
 925         ret = cache_do_downcall(kaddr, buf, count, cd);
 926         kunmap(page);
 927         unlock_page(page);
 928         put_page(page);
 929         return ret;
 930 out_slow:
 931         return cache_slow_downcall(buf, count, cd);
 932 }
 933 
 934 static ssize_t cache_write(struct file *filp, const char __user *buf,
 935                            size_t count, loff_t *ppos,
 936                            struct cache_detail *cd)
 937 {
 938         struct address_space *mapping = filp->f_mapping;
 939         struct inode *inode = file_inode(filp);
 940         ssize_t ret = -EINVAL;
 941 
 942         if (!cd->cache_parse)
 943                 goto out;
 944 
 945         inode_lock(inode);
 946         ret = cache_downcall(mapping, buf, count, cd);
 947         inode_unlock(inode);
 948 out:
 949         return ret;
 950 }
 951 
 952 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
 953 
 954 static __poll_t cache_poll(struct file *filp, poll_table *wait,
 955                                struct cache_detail *cd)
 956 {
 957         __poll_t mask;
 958         struct cache_reader *rp = filp->private_data;
 959         struct cache_queue *cq;
 960 
 961         poll_wait(filp, &queue_wait, wait);
 962 
 963         /* alway allow write */
 964         mask = EPOLLOUT | EPOLLWRNORM;
 965 
 966         if (!rp)
 967                 return mask;
 968 
 969         spin_lock(&queue_lock);
 970 
 971         for (cq= &rp->q; &cq->list != &cd->queue;
 972              cq = list_entry(cq->list.next, struct cache_queue, list))
 973                 if (!cq->reader) {
 974                         mask |= EPOLLIN | EPOLLRDNORM;
 975                         break;
 976                 }
 977         spin_unlock(&queue_lock);
 978         return mask;
 979 }
 980 
 981 static int cache_ioctl(struct inode *ino, struct file *filp,
 982                        unsigned int cmd, unsigned long arg,
 983                        struct cache_detail *cd)
 984 {
 985         int len = 0;
 986         struct cache_reader *rp = filp->private_data;
 987         struct cache_queue *cq;
 988 
 989         if (cmd != FIONREAD || !rp)
 990                 return -EINVAL;
 991 
 992         spin_lock(&queue_lock);
 993 
 994         /* only find the length remaining in current request,
 995          * or the length of the next request
 996          */
 997         for (cq= &rp->q; &cq->list != &cd->queue;
 998              cq = list_entry(cq->list.next, struct cache_queue, list))
 999                 if (!cq->reader) {
1000                         struct cache_request *cr =
1001                                 container_of(cq, struct cache_request, q);
1002                         len = cr->len - rp->offset;
1003                         break;
1004                 }
1005         spin_unlock(&queue_lock);
1006 
1007         return put_user(len, (int __user *)arg);
1008 }
1009 
1010 static int cache_open(struct inode *inode, struct file *filp,
1011                       struct cache_detail *cd)
1012 {
1013         struct cache_reader *rp = NULL;
1014 
1015         if (!cd || !try_module_get(cd->owner))
1016                 return -EACCES;
1017         nonseekable_open(inode, filp);
1018         if (filp->f_mode & FMODE_READ) {
1019                 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1020                 if (!rp) {
1021                         module_put(cd->owner);
1022                         return -ENOMEM;
1023                 }
1024                 rp->offset = 0;
1025                 rp->q.reader = 1;
1026 
1027                 spin_lock(&queue_lock);
1028                 list_add(&rp->q.list, &cd->queue);
1029                 spin_unlock(&queue_lock);
1030         }
1031         if (filp->f_mode & FMODE_WRITE)
1032                 atomic_inc(&cd->writers);
1033         filp->private_data = rp;
1034         return 0;
1035 }
1036 
1037 static int cache_release(struct inode *inode, struct file *filp,
1038                          struct cache_detail *cd)
1039 {
1040         struct cache_reader *rp = filp->private_data;
1041 
1042         if (rp) {
1043                 spin_lock(&queue_lock);
1044                 if (rp->offset) {
1045                         struct cache_queue *cq;
1046                         for (cq= &rp->q; &cq->list != &cd->queue;
1047                              cq = list_entry(cq->list.next, struct cache_queue, list))
1048                                 if (!cq->reader) {
1049                                         container_of(cq, struct cache_request, q)
1050                                                 ->readers--;
1051                                         break;
1052                                 }
1053                         rp->offset = 0;
1054                 }
1055                 list_del(&rp->q.list);
1056                 spin_unlock(&queue_lock);
1057 
1058                 filp->private_data = NULL;
1059                 kfree(rp);
1060 
1061         }
1062         if (filp->f_mode & FMODE_WRITE) {
1063                 atomic_dec(&cd->writers);
1064                 cd->last_close = seconds_since_boot();
1065         }
1066         module_put(cd->owner);
1067         return 0;
1068 }
1069 
1070 
1071 
1072 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1073 {
1074         struct cache_queue *cq, *tmp;
1075         struct cache_request *cr;
1076         struct list_head dequeued;
1077 
1078         INIT_LIST_HEAD(&dequeued);
1079         spin_lock(&queue_lock);
1080         list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1081                 if (!cq->reader) {
1082                         cr = container_of(cq, struct cache_request, q);
1083                         if (cr->item != ch)
1084                                 continue;
1085                         if (test_bit(CACHE_PENDING, &ch->flags))
1086                                 /* Lost a race and it is pending again */
1087                                 break;
1088                         if (cr->readers != 0)
1089                                 continue;
1090                         list_move(&cr->q.list, &dequeued);
1091                 }
1092         spin_unlock(&queue_lock);
1093         while (!list_empty(&dequeued)) {
1094                 cr = list_entry(dequeued.next, struct cache_request, q.list);
1095                 list_del(&cr->q.list);
1096                 cache_put(cr->item, detail);
1097                 kfree(cr->buf);
1098                 kfree(cr);
1099         }
1100 }
1101 
1102 /*
1103  * Support routines for text-based upcalls.
1104  * Fields are separated by spaces.
1105  * Fields are either mangled to quote space tab newline slosh with slosh
1106  * or a hexified with a leading \x
1107  * Record is terminated with newline.
1108  *
1109  */
1110 
1111 void qword_add(char **bpp, int *lp, char *str)
1112 {
1113         char *bp = *bpp;
1114         int len = *lp;
1115         int ret;
1116 
1117         if (len < 0) return;
1118 
1119         ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1120         if (ret >= len) {
1121                 bp += len;
1122                 len = -1;
1123         } else {
1124                 bp += ret;
1125                 len -= ret;
1126                 *bp++ = ' ';
1127                 len--;
1128         }
1129         *bpp = bp;
1130         *lp = len;
1131 }
1132 EXPORT_SYMBOL_GPL(qword_add);
1133 
1134 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1135 {
1136         char *bp = *bpp;
1137         int len = *lp;
1138 
1139         if (len < 0) return;
1140 
1141         if (len > 2) {
1142                 *bp++ = '\\';
1143                 *bp++ = 'x';
1144                 len -= 2;
1145                 while (blen && len >= 2) {
1146                         bp = hex_byte_pack(bp, *buf++);
1147                         len -= 2;
1148                         blen--;
1149                 }
1150         }
1151         if (blen || len<1) len = -1;
1152         else {
1153                 *bp++ = ' ';
1154                 len--;
1155         }
1156         *bpp = bp;
1157         *lp = len;
1158 }
1159 EXPORT_SYMBOL_GPL(qword_addhex);
1160 
1161 static void warn_no_listener(struct cache_detail *detail)
1162 {
1163         if (detail->last_warn != detail->last_close) {
1164                 detail->last_warn = detail->last_close;
1165                 if (detail->warn_no_listener)
1166                         detail->warn_no_listener(detail, detail->last_close != 0);
1167         }
1168 }
1169 
1170 static bool cache_listeners_exist(struct cache_detail *detail)
1171 {
1172         if (atomic_read(&detail->writers))
1173                 return true;
1174         if (detail->last_close == 0)
1175                 /* This cache was never opened */
1176                 return false;
1177         if (detail->last_close < seconds_since_boot() - 30)
1178                 /*
1179                  * We allow for the possibility that someone might
1180                  * restart a userspace daemon without restarting the
1181                  * server; but after 30 seconds, we give up.
1182                  */
1183                  return false;
1184         return true;
1185 }
1186 
1187 /*
1188  * register an upcall request to user-space and queue it up for read() by the
1189  * upcall daemon.
1190  *
1191  * Each request is at most one page long.
1192  */
1193 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1194 {
1195 
1196         char *buf;
1197         struct cache_request *crq;
1198         int ret = 0;
1199 
1200         if (!detail->cache_request)
1201                 return -EINVAL;
1202 
1203         if (!cache_listeners_exist(detail)) {
1204                 warn_no_listener(detail);
1205                 return -EINVAL;
1206         }
1207         if (test_bit(CACHE_CLEANED, &h->flags))
1208                 /* Too late to make an upcall */
1209                 return -EAGAIN;
1210 
1211         buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1212         if (!buf)
1213                 return -EAGAIN;
1214 
1215         crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1216         if (!crq) {
1217                 kfree(buf);
1218                 return -EAGAIN;
1219         }
1220 
1221         crq->q.reader = 0;
1222         crq->buf = buf;
1223         crq->len = 0;
1224         crq->readers = 0;
1225         spin_lock(&queue_lock);
1226         if (test_bit(CACHE_PENDING, &h->flags)) {
1227                 crq->item = cache_get(h);
1228                 list_add_tail(&crq->q.list, &detail->queue);
1229         } else
1230                 /* Lost a race, no longer PENDING, so don't enqueue */
1231                 ret = -EAGAIN;
1232         spin_unlock(&queue_lock);
1233         wake_up(&queue_wait);
1234         if (ret == -EAGAIN) {
1235                 kfree(buf);
1236                 kfree(crq);
1237         }
1238         return ret;
1239 }
1240 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1241 
1242 /*
1243  * parse a message from user-space and pass it
1244  * to an appropriate cache
1245  * Messages are, like requests, separated into fields by
1246  * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1247  *
1248  * Message is
1249  *   reply cachename expiry key ... content....
1250  *
1251  * key and content are both parsed by cache
1252  */
1253 
1254 int qword_get(char **bpp, char *dest, int bufsize)
1255 {
1256         /* return bytes copied, or -1 on error */
1257         char *bp = *bpp;
1258         int len = 0;
1259 
1260         while (*bp == ' ') bp++;
1261 
1262         if (bp[0] == '\\' && bp[1] == 'x') {
1263                 /* HEX STRING */
1264                 bp += 2;
1265                 while (len < bufsize - 1) {
1266                         int h, l;
1267 
1268                         h = hex_to_bin(bp[0]);
1269                         if (h < 0)
1270                                 break;
1271 
1272                         l = hex_to_bin(bp[1]);
1273                         if (l < 0)
1274                                 break;
1275 
1276                         *dest++ = (h << 4) | l;
1277                         bp += 2;
1278                         len++;
1279                 }
1280         } else {
1281                 /* text with \nnn octal quoting */
1282                 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1283                         if (*bp == '\\' &&
1284                             isodigit(bp[1]) && (bp[1] <= '3') &&
1285                             isodigit(bp[2]) &&
1286                             isodigit(bp[3])) {
1287                                 int byte = (*++bp -'0');
1288                                 bp++;
1289                                 byte = (byte << 3) | (*bp++ - '0');
1290                                 byte = (byte << 3) | (*bp++ - '0');
1291                                 *dest++ = byte;
1292                                 len++;
1293                         } else {
1294                                 *dest++ = *bp++;
1295                                 len++;
1296                         }
1297                 }
1298         }
1299 
1300         if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1301                 return -1;
1302         while (*bp == ' ') bp++;
1303         *bpp = bp;
1304         *dest = '\0';
1305         return len;
1306 }
1307 EXPORT_SYMBOL_GPL(qword_get);
1308 
1309 
1310 /*
1311  * support /proc/net/rpc/$CACHENAME/content
1312  * as a seqfile.
1313  * We call ->cache_show passing NULL for the item to
1314  * get a header, then pass each real item in the cache
1315  */
1316 
1317 static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1318 {
1319         loff_t n = *pos;
1320         unsigned int hash, entry;
1321         struct cache_head *ch;
1322         struct cache_detail *cd = m->private;
1323 
1324         if (!n--)
1325                 return SEQ_START_TOKEN;
1326         hash = n >> 32;
1327         entry = n & ((1LL<<32) - 1);
1328 
1329         hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1330                 if (!entry--)
1331                         return ch;
1332         n &= ~((1LL<<32) - 1);
1333         do {
1334                 hash++;
1335                 n += 1LL<<32;
1336         } while(hash < cd->hash_size &&
1337                 hlist_empty(&cd->hash_table[hash]));
1338         if (hash >= cd->hash_size)
1339                 return NULL;
1340         *pos = n+1;
1341         return hlist_entry_safe(rcu_dereference_raw(
1342                                 hlist_first_rcu(&cd->hash_table[hash])),
1343                                 struct cache_head, cache_list);
1344 }
1345 
1346 static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1347 {
1348         struct cache_head *ch = p;
1349         int hash = (*pos >> 32);
1350         struct cache_detail *cd = m->private;
1351 
1352         if (p == SEQ_START_TOKEN)
1353                 hash = 0;
1354         else if (ch->cache_list.next == NULL) {
1355                 hash++;
1356                 *pos += 1LL<<32;
1357         } else {
1358                 ++*pos;
1359                 return hlist_entry_safe(rcu_dereference_raw(
1360                                         hlist_next_rcu(&ch->cache_list)),
1361                                         struct cache_head, cache_list);
1362         }
1363         *pos &= ~((1LL<<32) - 1);
1364         while (hash < cd->hash_size &&
1365                hlist_empty(&cd->hash_table[hash])) {
1366                 hash++;
1367                 *pos += 1LL<<32;
1368         }
1369         if (hash >= cd->hash_size)
1370                 return NULL;
1371         ++*pos;
1372         return hlist_entry_safe(rcu_dereference_raw(
1373                                 hlist_first_rcu(&cd->hash_table[hash])),
1374                                 struct cache_head, cache_list);
1375 }
1376 
1377 void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1378         __acquires(RCU)
1379 {
1380         rcu_read_lock();
1381         return __cache_seq_start(m, pos);
1382 }
1383 EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1384 
1385 void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1386 {
1387         return cache_seq_next(file, p, pos);
1388 }
1389 EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1390 
1391 void cache_seq_stop_rcu(struct seq_file *m, void *p)
1392         __releases(RCU)
1393 {
1394         rcu_read_unlock();
1395 }
1396 EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1397 
1398 static int c_show(struct seq_file *m, void *p)
1399 {
1400         struct cache_head *cp = p;
1401         struct cache_detail *cd = m->private;
1402 
1403         if (p == SEQ_START_TOKEN)
1404                 return cd->cache_show(m, cd, NULL);
1405 
1406         ifdebug(CACHE)
1407                 seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
1408                            convert_to_wallclock(cp->expiry_time),
1409                            kref_read(&cp->ref), cp->flags);
1410         cache_get(cp);
1411         if (cache_check(cd, cp, NULL))
1412                 /* cache_check does a cache_put on failure */
1413                 seq_printf(m, "# ");
1414         else {
1415                 if (cache_is_expired(cd, cp))
1416                         seq_printf(m, "# ");
1417                 cache_put(cp, cd);
1418         }
1419 
1420         return cd->cache_show(m, cd, cp);
1421 }
1422 
1423 static const struct seq_operations cache_content_op = {
1424         .start  = cache_seq_start_rcu,
1425         .next   = cache_seq_next_rcu,
1426         .stop   = cache_seq_stop_rcu,
1427         .show   = c_show,
1428 };
1429 
1430 static int content_open(struct inode *inode, struct file *file,
1431                         struct cache_detail *cd)
1432 {
1433         struct seq_file *seq;
1434         int err;
1435 
1436         if (!cd || !try_module_get(cd->owner))
1437                 return -EACCES;
1438 
1439         err = seq_open(file, &cache_content_op);
1440         if (err) {
1441                 module_put(cd->owner);
1442                 return err;
1443         }
1444 
1445         seq = file->private_data;
1446         seq->private = cd;
1447         return 0;
1448 }
1449 
1450 static int content_release(struct inode *inode, struct file *file,
1451                 struct cache_detail *cd)
1452 {
1453         int ret = seq_release(inode, file);
1454         module_put(cd->owner);
1455         return ret;
1456 }
1457 
1458 static int open_flush(struct inode *inode, struct file *file,
1459                         struct cache_detail *cd)
1460 {
1461         if (!cd || !try_module_get(cd->owner))
1462                 return -EACCES;
1463         return nonseekable_open(inode, file);
1464 }
1465 
1466 static int release_flush(struct inode *inode, struct file *file,
1467                         struct cache_detail *cd)
1468 {
1469         module_put(cd->owner);
1470         return 0;
1471 }
1472 
1473 static ssize_t read_flush(struct file *file, char __user *buf,
1474                           size_t count, loff_t *ppos,
1475                           struct cache_detail *cd)
1476 {
1477         char tbuf[22];
1478         size_t len;
1479 
1480         len = snprintf(tbuf, sizeof(tbuf), "%lu\n",
1481                         convert_to_wallclock(cd->flush_time));
1482         return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1483 }
1484 
1485 static ssize_t write_flush(struct file *file, const char __user *buf,
1486                            size_t count, loff_t *ppos,
1487                            struct cache_detail *cd)
1488 {
1489         char tbuf[20];
1490         char *ep;
1491         time_t now;
1492 
1493         if (*ppos || count > sizeof(tbuf)-1)
1494                 return -EINVAL;
1495         if (copy_from_user(tbuf, buf, count))
1496                 return -EFAULT;
1497         tbuf[count] = 0;
1498         simple_strtoul(tbuf, &ep, 0);
1499         if (*ep && *ep != '\n')
1500                 return -EINVAL;
1501         /* Note that while we check that 'buf' holds a valid number,
1502          * we always ignore the value and just flush everything.
1503          * Making use of the number leads to races.
1504          */
1505 
1506         now = seconds_since_boot();
1507         /* Always flush everything, so behave like cache_purge()
1508          * Do this by advancing flush_time to the current time,
1509          * or by one second if it has already reached the current time.
1510          * Newly added cache entries will always have ->last_refresh greater
1511          * that ->flush_time, so they don't get flushed prematurely.
1512          */
1513 
1514         if (cd->flush_time >= now)
1515                 now = cd->flush_time + 1;
1516 
1517         cd->flush_time = now;
1518         cd->nextcheck = now;
1519         cache_flush();
1520 
1521         if (cd->flush)
1522                 cd->flush();
1523 
1524         *ppos += count;
1525         return count;
1526 }
1527 
1528 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1529                                  size_t count, loff_t *ppos)
1530 {
1531         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1532 
1533         return cache_read(filp, buf, count, ppos, cd);
1534 }
1535 
1536 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1537                                   size_t count, loff_t *ppos)
1538 {
1539         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1540 
1541         return cache_write(filp, buf, count, ppos, cd);
1542 }
1543 
1544 static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1545 {
1546         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1547 
1548         return cache_poll(filp, wait, cd);
1549 }
1550 
1551 static long cache_ioctl_procfs(struct file *filp,
1552                                unsigned int cmd, unsigned long arg)
1553 {
1554         struct inode *inode = file_inode(filp);
1555         struct cache_detail *cd = PDE_DATA(inode);
1556 
1557         return cache_ioctl(inode, filp, cmd, arg, cd);
1558 }
1559 
1560 static int cache_open_procfs(struct inode *inode, struct file *filp)
1561 {
1562         struct cache_detail *cd = PDE_DATA(inode);
1563 
1564         return cache_open(inode, filp, cd);
1565 }
1566 
1567 static int cache_release_procfs(struct inode *inode, struct file *filp)
1568 {
1569         struct cache_detail *cd = PDE_DATA(inode);
1570 
1571         return cache_release(inode, filp, cd);
1572 }
1573 
1574 static const struct file_operations cache_file_operations_procfs = {
1575         .owner          = THIS_MODULE,
1576         .llseek         = no_llseek,
1577         .read           = cache_read_procfs,
1578         .write          = cache_write_procfs,
1579         .poll           = cache_poll_procfs,
1580         .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1581         .open           = cache_open_procfs,
1582         .release        = cache_release_procfs,
1583 };
1584 
1585 static int content_open_procfs(struct inode *inode, struct file *filp)
1586 {
1587         struct cache_detail *cd = PDE_DATA(inode);
1588 
1589         return content_open(inode, filp, cd);
1590 }
1591 
1592 static int content_release_procfs(struct inode *inode, struct file *filp)
1593 {
1594         struct cache_detail *cd = PDE_DATA(inode);
1595 
1596         return content_release(inode, filp, cd);
1597 }
1598 
1599 static const struct file_operations content_file_operations_procfs = {
1600         .open           = content_open_procfs,
1601         .read           = seq_read,
1602         .llseek         = seq_lseek,
1603         .release        = content_release_procfs,
1604 };
1605 
1606 static int open_flush_procfs(struct inode *inode, struct file *filp)
1607 {
1608         struct cache_detail *cd = PDE_DATA(inode);
1609 
1610         return open_flush(inode, filp, cd);
1611 }
1612 
1613 static int release_flush_procfs(struct inode *inode, struct file *filp)
1614 {
1615         struct cache_detail *cd = PDE_DATA(inode);
1616 
1617         return release_flush(inode, filp, cd);
1618 }
1619 
1620 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1621                             size_t count, loff_t *ppos)
1622 {
1623         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1624 
1625         return read_flush(filp, buf, count, ppos, cd);
1626 }
1627 
1628 static ssize_t write_flush_procfs(struct file *filp,
1629                                   const char __user *buf,
1630                                   size_t count, loff_t *ppos)
1631 {
1632         struct cache_detail *cd = PDE_DATA(file_inode(filp));
1633 
1634         return write_flush(filp, buf, count, ppos, cd);
1635 }
1636 
1637 static const struct file_operations cache_flush_operations_procfs = {
1638         .open           = open_flush_procfs,
1639         .read           = read_flush_procfs,
1640         .write          = write_flush_procfs,
1641         .release        = release_flush_procfs,
1642         .llseek         = no_llseek,
1643 };
1644 
1645 static void remove_cache_proc_entries(struct cache_detail *cd)
1646 {
1647         if (cd->procfs) {
1648                 proc_remove(cd->procfs);
1649                 cd->procfs = NULL;
1650         }
1651 }
1652 
1653 #ifdef CONFIG_PROC_FS
1654 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1655 {
1656         struct proc_dir_entry *p;
1657         struct sunrpc_net *sn;
1658 
1659         sn = net_generic(net, sunrpc_net_id);
1660         cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1661         if (cd->procfs == NULL)
1662                 goto out_nomem;
1663 
1664         p = proc_create_data("flush", S_IFREG | 0600,
1665                              cd->procfs, &cache_flush_operations_procfs, cd);
1666         if (p == NULL)
1667                 goto out_nomem;
1668 
1669         if (cd->cache_request || cd->cache_parse) {
1670                 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1671                                      &cache_file_operations_procfs, cd);
1672                 if (p == NULL)
1673                         goto out_nomem;
1674         }
1675         if (cd->cache_show) {
1676                 p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1677                                      &content_file_operations_procfs, cd);
1678                 if (p == NULL)
1679                         goto out_nomem;
1680         }
1681         return 0;
1682 out_nomem:
1683         remove_cache_proc_entries(cd);
1684         return -ENOMEM;
1685 }
1686 #else /* CONFIG_PROC_FS */
1687 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1688 {
1689         return 0;
1690 }
1691 #endif
1692 
1693 void __init cache_initialize(void)
1694 {
1695         INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1696 }
1697 
1698 int cache_register_net(struct cache_detail *cd, struct net *net)
1699 {
1700         int ret;
1701 
1702         sunrpc_init_cache_detail(cd);
1703         ret = create_cache_proc_entries(cd, net);
1704         if (ret)
1705                 sunrpc_destroy_cache_detail(cd);
1706         return ret;
1707 }
1708 EXPORT_SYMBOL_GPL(cache_register_net);
1709 
1710 void cache_unregister_net(struct cache_detail *cd, struct net *net)
1711 {
1712         remove_cache_proc_entries(cd);
1713         sunrpc_destroy_cache_detail(cd);
1714 }
1715 EXPORT_SYMBOL_GPL(cache_unregister_net);
1716 
1717 struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1718 {
1719         struct cache_detail *cd;
1720         int i;
1721 
1722         cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1723         if (cd == NULL)
1724                 return ERR_PTR(-ENOMEM);
1725 
1726         cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1727                                  GFP_KERNEL);
1728         if (cd->hash_table == NULL) {
1729                 kfree(cd);
1730                 return ERR_PTR(-ENOMEM);
1731         }
1732 
1733         for (i = 0; i < cd->hash_size; i++)
1734                 INIT_HLIST_HEAD(&cd->hash_table[i]);
1735         cd->net = net;
1736         return cd;
1737 }
1738 EXPORT_SYMBOL_GPL(cache_create_net);
1739 
1740 void cache_destroy_net(struct cache_detail *cd, struct net *net)
1741 {
1742         kfree(cd->hash_table);
1743         kfree(cd);
1744 }
1745 EXPORT_SYMBOL_GPL(cache_destroy_net);
1746 
1747 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1748                                  size_t count, loff_t *ppos)
1749 {
1750         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1751 
1752         return cache_read(filp, buf, count, ppos, cd);
1753 }
1754 
1755 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1756                                   size_t count, loff_t *ppos)
1757 {
1758         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1759 
1760         return cache_write(filp, buf, count, ppos, cd);
1761 }
1762 
1763 static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1764 {
1765         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1766 
1767         return cache_poll(filp, wait, cd);
1768 }
1769 
1770 static long cache_ioctl_pipefs(struct file *filp,
1771                               unsigned int cmd, unsigned long arg)
1772 {
1773         struct inode *inode = file_inode(filp);
1774         struct cache_detail *cd = RPC_I(inode)->private;
1775 
1776         return cache_ioctl(inode, filp, cmd, arg, cd);
1777 }
1778 
1779 static int cache_open_pipefs(struct inode *inode, struct file *filp)
1780 {
1781         struct cache_detail *cd = RPC_I(inode)->private;
1782 
1783         return cache_open(inode, filp, cd);
1784 }
1785 
1786 static int cache_release_pipefs(struct inode *inode, struct file *filp)
1787 {
1788         struct cache_detail *cd = RPC_I(inode)->private;
1789 
1790         return cache_release(inode, filp, cd);
1791 }
1792 
1793 const struct file_operations cache_file_operations_pipefs = {
1794         .owner          = THIS_MODULE,
1795         .llseek         = no_llseek,
1796         .read           = cache_read_pipefs,
1797         .write          = cache_write_pipefs,
1798         .poll           = cache_poll_pipefs,
1799         .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1800         .open           = cache_open_pipefs,
1801         .release        = cache_release_pipefs,
1802 };
1803 
1804 static int content_open_pipefs(struct inode *inode, struct file *filp)
1805 {
1806         struct cache_detail *cd = RPC_I(inode)->private;
1807 
1808         return content_open(inode, filp, cd);
1809 }
1810 
1811 static int content_release_pipefs(struct inode *inode, struct file *filp)
1812 {
1813         struct cache_detail *cd = RPC_I(inode)->private;
1814 
1815         return content_release(inode, filp, cd);
1816 }
1817 
1818 const struct file_operations content_file_operations_pipefs = {
1819         .open           = content_open_pipefs,
1820         .read           = seq_read,
1821         .llseek         = seq_lseek,
1822         .release        = content_release_pipefs,
1823 };
1824 
1825 static int open_flush_pipefs(struct inode *inode, struct file *filp)
1826 {
1827         struct cache_detail *cd = RPC_I(inode)->private;
1828 
1829         return open_flush(inode, filp, cd);
1830 }
1831 
1832 static int release_flush_pipefs(struct inode *inode, struct file *filp)
1833 {
1834         struct cache_detail *cd = RPC_I(inode)->private;
1835 
1836         return release_flush(inode, filp, cd);
1837 }
1838 
1839 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1840                             size_t count, loff_t *ppos)
1841 {
1842         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1843 
1844         return read_flush(filp, buf, count, ppos, cd);
1845 }
1846 
1847 static ssize_t write_flush_pipefs(struct file *filp,
1848                                   const char __user *buf,
1849                                   size_t count, loff_t *ppos)
1850 {
1851         struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1852 
1853         return write_flush(filp, buf, count, ppos, cd);
1854 }
1855 
1856 const struct file_operations cache_flush_operations_pipefs = {
1857         .open           = open_flush_pipefs,
1858         .read           = read_flush_pipefs,
1859         .write          = write_flush_pipefs,
1860         .release        = release_flush_pipefs,
1861         .llseek         = no_llseek,
1862 };
1863 
1864 int sunrpc_cache_register_pipefs(struct dentry *parent,
1865                                  const char *name, umode_t umode,
1866                                  struct cache_detail *cd)
1867 {
1868         struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1869         if (IS_ERR(dir))
1870                 return PTR_ERR(dir);
1871         cd->pipefs = dir;
1872         return 0;
1873 }
1874 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1875 
1876 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1877 {
1878         if (cd->pipefs) {
1879                 rpc_remove_cache_dir(cd->pipefs);
1880                 cd->pipefs = NULL;
1881         }
1882 }
1883 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1884 
1885 void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1886 {
1887         spin_lock(&cd->hash_lock);
1888         if (!hlist_unhashed(&h->cache_list)){
1889                 hlist_del_init_rcu(&h->cache_list);
1890                 cd->entries--;
1891                 set_bit(CACHE_CLEANED, &h->flags);
1892                 spin_unlock(&cd->hash_lock);
1893                 cache_fresh_unlocked(h, cd);
1894                 cache_put(h, cd);
1895         } else
1896                 spin_unlock(&cd->hash_lock);
1897 }
1898 EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);

/* [<][>][^][v][top][bottom][index][help] */