root/fs/squashfs/cache.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. squashfs_cache_get
  2. squashfs_cache_put
  3. squashfs_cache_delete
  4. squashfs_cache_init
  5. squashfs_copy_data
  6. squashfs_read_metadata
  7. squashfs_get_fragment
  8. squashfs_get_datablock
  9. squashfs_read_table

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Squashfs - a compressed read only filesystem for Linux
   4  *
   5  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
   6  * Phillip Lougher <phillip@squashfs.org.uk>
   7  *
   8  * cache.c
   9  */
  10 
  11 /*
  12  * Blocks in Squashfs are compressed.  To avoid repeatedly decompressing
  13  * recently accessed data Squashfs uses two small metadata and fragment caches.
  14  *
  15  * This file implements a generic cache implementation used for both caches,
  16  * plus functions layered ontop of the generic cache implementation to
  17  * access the metadata and fragment caches.
  18  *
  19  * To avoid out of memory and fragmentation issues with vmalloc the cache
  20  * uses sequences of kmalloced PAGE_SIZE buffers.
  21  *
  22  * It should be noted that the cache is not used for file datablocks, these
  23  * are decompressed and cached in the page-cache in the normal way.  The
  24  * cache is only used to temporarily cache fragment and metadata blocks
  25  * which have been read as as a result of a metadata (i.e. inode or
  26  * directory) or fragment access.  Because metadata and fragments are packed
  27  * together into blocks (to gain greater compression) the read of a particular
  28  * piece of metadata or fragment will retrieve other metadata/fragments which
  29  * have been packed with it, these because of locality-of-reference may be read
  30  * in the near future. Temporarily caching them ensures they are available for
  31  * near future access without requiring an additional read and decompress.
  32  */
  33 
  34 #include <linux/fs.h>
  35 #include <linux/vfs.h>
  36 #include <linux/slab.h>
  37 #include <linux/vmalloc.h>
  38 #include <linux/sched.h>
  39 #include <linux/spinlock.h>
  40 #include <linux/wait.h>
  41 #include <linux/pagemap.h>
  42 
  43 #include "squashfs_fs.h"
  44 #include "squashfs_fs_sb.h"
  45 #include "squashfs.h"
  46 #include "page_actor.h"
  47 
  48 /*
  49  * Look-up block in cache, and increment usage count.  If not in cache, read
  50  * and decompress it from disk.
  51  */
  52 struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
  53         struct squashfs_cache *cache, u64 block, int length)
  54 {
  55         int i, n;
  56         struct squashfs_cache_entry *entry;
  57 
  58         spin_lock(&cache->lock);
  59 
  60         while (1) {
  61                 for (i = cache->curr_blk, n = 0; n < cache->entries; n++) {
  62                         if (cache->entry[i].block == block) {
  63                                 cache->curr_blk = i;
  64                                 break;
  65                         }
  66                         i = (i + 1) % cache->entries;
  67                 }
  68 
  69                 if (n == cache->entries) {
  70                         /*
  71                          * Block not in cache, if all cache entries are used
  72                          * go to sleep waiting for one to become available.
  73                          */
  74                         if (cache->unused == 0) {
  75                                 cache->num_waiters++;
  76                                 spin_unlock(&cache->lock);
  77                                 wait_event(cache->wait_queue, cache->unused);
  78                                 spin_lock(&cache->lock);
  79                                 cache->num_waiters--;
  80                                 continue;
  81                         }
  82 
  83                         /*
  84                          * At least one unused cache entry.  A simple
  85                          * round-robin strategy is used to choose the entry to
  86                          * be evicted from the cache.
  87                          */
  88                         i = cache->next_blk;
  89                         for (n = 0; n < cache->entries; n++) {
  90                                 if (cache->entry[i].refcount == 0)
  91                                         break;
  92                                 i = (i + 1) % cache->entries;
  93                         }
  94 
  95                         cache->next_blk = (i + 1) % cache->entries;
  96                         entry = &cache->entry[i];
  97 
  98                         /*
  99                          * Initialise chosen cache entry, and fill it in from
 100                          * disk.
 101                          */
 102                         cache->unused--;
 103                         entry->block = block;
 104                         entry->refcount = 1;
 105                         entry->pending = 1;
 106                         entry->num_waiters = 0;
 107                         entry->error = 0;
 108                         spin_unlock(&cache->lock);
 109 
 110                         entry->length = squashfs_read_data(sb, block, length,
 111                                 &entry->next_index, entry->actor);
 112 
 113                         spin_lock(&cache->lock);
 114 
 115                         if (entry->length < 0)
 116                                 entry->error = entry->length;
 117 
 118                         entry->pending = 0;
 119 
 120                         /*
 121                          * While filling this entry one or more other processes
 122                          * have looked it up in the cache, and have slept
 123                          * waiting for it to become available.
 124                          */
 125                         if (entry->num_waiters) {
 126                                 spin_unlock(&cache->lock);
 127                                 wake_up_all(&entry->wait_queue);
 128                         } else
 129                                 spin_unlock(&cache->lock);
 130 
 131                         goto out;
 132                 }
 133 
 134                 /*
 135                  * Block already in cache.  Increment refcount so it doesn't
 136                  * get reused until we're finished with it, if it was
 137                  * previously unused there's one less cache entry available
 138                  * for reuse.
 139                  */
 140                 entry = &cache->entry[i];
 141                 if (entry->refcount == 0)
 142                         cache->unused--;
 143                 entry->refcount++;
 144 
 145                 /*
 146                  * If the entry is currently being filled in by another process
 147                  * go to sleep waiting for it to become available.
 148                  */
 149                 if (entry->pending) {
 150                         entry->num_waiters++;
 151                         spin_unlock(&cache->lock);
 152                         wait_event(entry->wait_queue, !entry->pending);
 153                 } else
 154                         spin_unlock(&cache->lock);
 155 
 156                 goto out;
 157         }
 158 
 159 out:
 160         TRACE("Got %s %d, start block %lld, refcount %d, error %d\n",
 161                 cache->name, i, entry->block, entry->refcount, entry->error);
 162 
 163         if (entry->error)
 164                 ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
 165                                                         block);
 166         return entry;
 167 }
 168 
 169 
 170 /*
 171  * Release cache entry, once usage count is zero it can be reused.
 172  */
 173 void squashfs_cache_put(struct squashfs_cache_entry *entry)
 174 {
 175         struct squashfs_cache *cache = entry->cache;
 176 
 177         spin_lock(&cache->lock);
 178         entry->refcount--;
 179         if (entry->refcount == 0) {
 180                 cache->unused++;
 181                 /*
 182                  * If there's any processes waiting for a block to become
 183                  * available, wake one up.
 184                  */
 185                 if (cache->num_waiters) {
 186                         spin_unlock(&cache->lock);
 187                         wake_up(&cache->wait_queue);
 188                         return;
 189                 }
 190         }
 191         spin_unlock(&cache->lock);
 192 }
 193 
 194 /*
 195  * Delete cache reclaiming all kmalloced buffers.
 196  */
 197 void squashfs_cache_delete(struct squashfs_cache *cache)
 198 {
 199         int i, j;
 200 
 201         if (cache == NULL)
 202                 return;
 203 
 204         for (i = 0; i < cache->entries; i++) {
 205                 if (cache->entry[i].data) {
 206                         for (j = 0; j < cache->pages; j++)
 207                                 kfree(cache->entry[i].data[j]);
 208                         kfree(cache->entry[i].data);
 209                 }
 210                 kfree(cache->entry[i].actor);
 211         }
 212 
 213         kfree(cache->entry);
 214         kfree(cache);
 215 }
 216 
 217 
 218 /*
 219  * Initialise cache allocating the specified number of entries, each of
 220  * size block_size.  To avoid vmalloc fragmentation issues each entry
 221  * is allocated as a sequence of kmalloced PAGE_SIZE buffers.
 222  */
 223 struct squashfs_cache *squashfs_cache_init(char *name, int entries,
 224         int block_size)
 225 {
 226         int i, j;
 227         struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
 228 
 229         if (cache == NULL) {
 230                 ERROR("Failed to allocate %s cache\n", name);
 231                 return NULL;
 232         }
 233 
 234         cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
 235         if (cache->entry == NULL) {
 236                 ERROR("Failed to allocate %s cache\n", name);
 237                 goto cleanup;
 238         }
 239 
 240         cache->curr_blk = 0;
 241         cache->next_blk = 0;
 242         cache->unused = entries;
 243         cache->entries = entries;
 244         cache->block_size = block_size;
 245         cache->pages = block_size >> PAGE_SHIFT;
 246         cache->pages = cache->pages ? cache->pages : 1;
 247         cache->name = name;
 248         cache->num_waiters = 0;
 249         spin_lock_init(&cache->lock);
 250         init_waitqueue_head(&cache->wait_queue);
 251 
 252         for (i = 0; i < entries; i++) {
 253                 struct squashfs_cache_entry *entry = &cache->entry[i];
 254 
 255                 init_waitqueue_head(&cache->entry[i].wait_queue);
 256                 entry->cache = cache;
 257                 entry->block = SQUASHFS_INVALID_BLK;
 258                 entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
 259                 if (entry->data == NULL) {
 260                         ERROR("Failed to allocate %s cache entry\n", name);
 261                         goto cleanup;
 262                 }
 263 
 264                 for (j = 0; j < cache->pages; j++) {
 265                         entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
 266                         if (entry->data[j] == NULL) {
 267                                 ERROR("Failed to allocate %s buffer\n", name);
 268                                 goto cleanup;
 269                         }
 270                 }
 271 
 272                 entry->actor = squashfs_page_actor_init(entry->data,
 273                                                 cache->pages, 0);
 274                 if (entry->actor == NULL) {
 275                         ERROR("Failed to allocate %s cache entry\n", name);
 276                         goto cleanup;
 277                 }
 278         }
 279 
 280         return cache;
 281 
 282 cleanup:
 283         squashfs_cache_delete(cache);
 284         return NULL;
 285 }
 286 
 287 
 288 /*
 289  * Copy up to length bytes from cache entry to buffer starting at offset bytes
 290  * into the cache entry.  If there's not length bytes then copy the number of
 291  * bytes available.  In all cases return the number of bytes copied.
 292  */
 293 int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
 294                 int offset, int length)
 295 {
 296         int remaining = length;
 297 
 298         if (length == 0)
 299                 return 0;
 300         else if (buffer == NULL)
 301                 return min(length, entry->length - offset);
 302 
 303         while (offset < entry->length) {
 304                 void *buff = entry->data[offset / PAGE_SIZE]
 305                                 + (offset % PAGE_SIZE);
 306                 int bytes = min_t(int, entry->length - offset,
 307                                 PAGE_SIZE - (offset % PAGE_SIZE));
 308 
 309                 if (bytes >= remaining) {
 310                         memcpy(buffer, buff, remaining);
 311                         remaining = 0;
 312                         break;
 313                 }
 314 
 315                 memcpy(buffer, buff, bytes);
 316                 buffer += bytes;
 317                 remaining -= bytes;
 318                 offset += bytes;
 319         }
 320 
 321         return length - remaining;
 322 }
 323 
 324 
 325 /*
 326  * Read length bytes from metadata position <block, offset> (block is the
 327  * start of the compressed block on disk, and offset is the offset into
 328  * the block once decompressed).  Data is packed into consecutive blocks,
 329  * and length bytes may require reading more than one block.
 330  */
 331 int squashfs_read_metadata(struct super_block *sb, void *buffer,
 332                 u64 *block, int *offset, int length)
 333 {
 334         struct squashfs_sb_info *msblk = sb->s_fs_info;
 335         int bytes, res = length;
 336         struct squashfs_cache_entry *entry;
 337 
 338         TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
 339 
 340         if (unlikely(length < 0))
 341                 return -EIO;
 342 
 343         while (length) {
 344                 entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
 345                 if (entry->error) {
 346                         res = entry->error;
 347                         goto error;
 348                 } else if (*offset >= entry->length) {
 349                         res = -EIO;
 350                         goto error;
 351                 }
 352 
 353                 bytes = squashfs_copy_data(buffer, entry, *offset, length);
 354                 if (buffer)
 355                         buffer += bytes;
 356                 length -= bytes;
 357                 *offset += bytes;
 358 
 359                 if (*offset == entry->length) {
 360                         *block = entry->next_index;
 361                         *offset = 0;
 362                 }
 363 
 364                 squashfs_cache_put(entry);
 365         }
 366 
 367         return res;
 368 
 369 error:
 370         squashfs_cache_put(entry);
 371         return res;
 372 }
 373 
 374 
 375 /*
 376  * Look-up in the fragmment cache the fragment located at <start_block> in the
 377  * filesystem.  If necessary read and decompress it from disk.
 378  */
 379 struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb,
 380                                 u64 start_block, int length)
 381 {
 382         struct squashfs_sb_info *msblk = sb->s_fs_info;
 383 
 384         return squashfs_cache_get(sb, msblk->fragment_cache, start_block,
 385                 length);
 386 }
 387 
 388 
 389 /*
 390  * Read and decompress the datablock located at <start_block> in the
 391  * filesystem.  The cache is used here to avoid duplicating locking and
 392  * read/decompress code.
 393  */
 394 struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
 395                                 u64 start_block, int length)
 396 {
 397         struct squashfs_sb_info *msblk = sb->s_fs_info;
 398 
 399         return squashfs_cache_get(sb, msblk->read_page, start_block, length);
 400 }
 401 
 402 
 403 /*
 404  * Read a filesystem table (uncompressed sequence of bytes) from disk
 405  */
 406 void *squashfs_read_table(struct super_block *sb, u64 block, int length)
 407 {
 408         int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
 409         int i, res;
 410         void *table, *buffer, **data;
 411         struct squashfs_page_actor *actor;
 412 
 413         table = buffer = kmalloc(length, GFP_KERNEL);
 414         if (table == NULL)
 415                 return ERR_PTR(-ENOMEM);
 416 
 417         data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
 418         if (data == NULL) {
 419                 res = -ENOMEM;
 420                 goto failed;
 421         }
 422 
 423         actor = squashfs_page_actor_init(data, pages, length);
 424         if (actor == NULL) {
 425                 res = -ENOMEM;
 426                 goto failed2;
 427         }
 428 
 429         for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
 430                 data[i] = buffer;
 431 
 432         res = squashfs_read_data(sb, block, length |
 433                 SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor);
 434 
 435         kfree(data);
 436         kfree(actor);
 437 
 438         if (res < 0)
 439                 goto failed;
 440 
 441         return table;
 442 
 443 failed2:
 444         kfree(data);
 445 failed:
 446         kfree(table);
 447         return ERR_PTR(res);
 448 }

/* [<][>][^][v][top][bottom][index][help] */