root/drivers/block/brd.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. brd_lookup_page
  2. brd_insert_page
  3. brd_free_pages
  4. copy_to_brd_setup
  5. copy_to_brd
  6. copy_from_brd
  7. brd_do_bvec
  8. brd_make_request
  9. brd_rw_page
  10. ramdisk_size
  11. brd_alloc
  12. brd_free
  13. brd_init_one
  14. brd_del_one
  15. brd_probe
  16. brd_check_and_reset_par
  17. brd_init
  18. brd_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Ram backed block device driver.
   4  *
   5  * Copyright (C) 2007 Nick Piggin
   6  * Copyright (C) 2007 Novell Inc.
   7  *
   8  * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
   9  * of their respective owners.
  10  */
  11 
  12 #include <linux/init.h>
  13 #include <linux/initrd.h>
  14 #include <linux/module.h>
  15 #include <linux/moduleparam.h>
  16 #include <linux/major.h>
  17 #include <linux/blkdev.h>
  18 #include <linux/bio.h>
  19 #include <linux/highmem.h>
  20 #include <linux/mutex.h>
  21 #include <linux/radix-tree.h>
  22 #include <linux/fs.h>
  23 #include <linux/slab.h>
  24 #include <linux/backing-dev.h>
  25 
  26 #include <linux/uaccess.h>
  27 
  28 #define PAGE_SECTORS_SHIFT      (PAGE_SHIFT - SECTOR_SHIFT)
  29 #define PAGE_SECTORS            (1 << PAGE_SECTORS_SHIFT)
  30 
  31 /*
  32  * Each block ramdisk device has a radix_tree brd_pages of pages that stores
  33  * the pages containing the block device's contents. A brd page's ->index is
  34  * its offset in PAGE_SIZE units. This is similar to, but in no way connected
  35  * with, the kernel's pagecache or buffer cache (which sit above our block
  36  * device).
  37  */
  38 struct brd_device {
  39         int             brd_number;
  40 
  41         struct request_queue    *brd_queue;
  42         struct gendisk          *brd_disk;
  43         struct list_head        brd_list;
  44 
  45         /*
  46          * Backing store of pages and lock to protect it. This is the contents
  47          * of the block device.
  48          */
  49         spinlock_t              brd_lock;
  50         struct radix_tree_root  brd_pages;
  51 };
  52 
  53 /*
  54  * Look up and return a brd's page for a given sector.
  55  */
  56 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
  57 {
  58         pgoff_t idx;
  59         struct page *page;
  60 
  61         /*
  62          * The page lifetime is protected by the fact that we have opened the
  63          * device node -- brd pages will never be deleted under us, so we
  64          * don't need any further locking or refcounting.
  65          *
  66          * This is strictly true for the radix-tree nodes as well (ie. we
  67          * don't actually need the rcu_read_lock()), however that is not a
  68          * documented feature of the radix-tree API so it is better to be
  69          * safe here (we don't have total exclusion from radix tree updates
  70          * here, only deletes).
  71          */
  72         rcu_read_lock();
  73         idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
  74         page = radix_tree_lookup(&brd->brd_pages, idx);
  75         rcu_read_unlock();
  76 
  77         BUG_ON(page && page->index != idx);
  78 
  79         return page;
  80 }
  81 
  82 /*
  83  * Look up and return a brd's page for a given sector.
  84  * If one does not exist, allocate an empty page, and insert that. Then
  85  * return it.
  86  */
  87 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
  88 {
  89         pgoff_t idx;
  90         struct page *page;
  91         gfp_t gfp_flags;
  92 
  93         page = brd_lookup_page(brd, sector);
  94         if (page)
  95                 return page;
  96 
  97         /*
  98          * Must use NOIO because we don't want to recurse back into the
  99          * block or filesystem layers from page reclaim.
 100          */
 101         gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
 102         page = alloc_page(gfp_flags);
 103         if (!page)
 104                 return NULL;
 105 
 106         if (radix_tree_preload(GFP_NOIO)) {
 107                 __free_page(page);
 108                 return NULL;
 109         }
 110 
 111         spin_lock(&brd->brd_lock);
 112         idx = sector >> PAGE_SECTORS_SHIFT;
 113         page->index = idx;
 114         if (radix_tree_insert(&brd->brd_pages, idx, page)) {
 115                 __free_page(page);
 116                 page = radix_tree_lookup(&brd->brd_pages, idx);
 117                 BUG_ON(!page);
 118                 BUG_ON(page->index != idx);
 119         }
 120         spin_unlock(&brd->brd_lock);
 121 
 122         radix_tree_preload_end();
 123 
 124         return page;
 125 }
 126 
 127 /*
 128  * Free all backing store pages and radix tree. This must only be called when
 129  * there are no other users of the device.
 130  */
 131 #define FREE_BATCH 16
 132 static void brd_free_pages(struct brd_device *brd)
 133 {
 134         unsigned long pos = 0;
 135         struct page *pages[FREE_BATCH];
 136         int nr_pages;
 137 
 138         do {
 139                 int i;
 140 
 141                 nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
 142                                 (void **)pages, pos, FREE_BATCH);
 143 
 144                 for (i = 0; i < nr_pages; i++) {
 145                         void *ret;
 146 
 147                         BUG_ON(pages[i]->index < pos);
 148                         pos = pages[i]->index;
 149                         ret = radix_tree_delete(&brd->brd_pages, pos);
 150                         BUG_ON(!ret || ret != pages[i]);
 151                         __free_page(pages[i]);
 152                 }
 153 
 154                 pos++;
 155 
 156                 /*
 157                  * It takes 3.4 seconds to remove 80GiB ramdisk.
 158                  * So, we need cond_resched to avoid stalling the CPU.
 159                  */
 160                 cond_resched();
 161 
 162                 /*
 163                  * This assumes radix_tree_gang_lookup always returns as
 164                  * many pages as possible. If the radix-tree code changes,
 165                  * so will this have to.
 166                  */
 167         } while (nr_pages == FREE_BATCH);
 168 }
 169 
 170 /*
 171  * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
 172  */
 173 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
 174 {
 175         unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
 176         size_t copy;
 177 
 178         copy = min_t(size_t, n, PAGE_SIZE - offset);
 179         if (!brd_insert_page(brd, sector))
 180                 return -ENOSPC;
 181         if (copy < n) {
 182                 sector += copy >> SECTOR_SHIFT;
 183                 if (!brd_insert_page(brd, sector))
 184                         return -ENOSPC;
 185         }
 186         return 0;
 187 }
 188 
 189 /*
 190  * Copy n bytes from src to the brd starting at sector. Does not sleep.
 191  */
 192 static void copy_to_brd(struct brd_device *brd, const void *src,
 193                         sector_t sector, size_t n)
 194 {
 195         struct page *page;
 196         void *dst;
 197         unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
 198         size_t copy;
 199 
 200         copy = min_t(size_t, n, PAGE_SIZE - offset);
 201         page = brd_lookup_page(brd, sector);
 202         BUG_ON(!page);
 203 
 204         dst = kmap_atomic(page);
 205         memcpy(dst + offset, src, copy);
 206         kunmap_atomic(dst);
 207 
 208         if (copy < n) {
 209                 src += copy;
 210                 sector += copy >> SECTOR_SHIFT;
 211                 copy = n - copy;
 212                 page = brd_lookup_page(brd, sector);
 213                 BUG_ON(!page);
 214 
 215                 dst = kmap_atomic(page);
 216                 memcpy(dst, src, copy);
 217                 kunmap_atomic(dst);
 218         }
 219 }
 220 
 221 /*
 222  * Copy n bytes to dst from the brd starting at sector. Does not sleep.
 223  */
 224 static void copy_from_brd(void *dst, struct brd_device *brd,
 225                         sector_t sector, size_t n)
 226 {
 227         struct page *page;
 228         void *src;
 229         unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
 230         size_t copy;
 231 
 232         copy = min_t(size_t, n, PAGE_SIZE - offset);
 233         page = brd_lookup_page(brd, sector);
 234         if (page) {
 235                 src = kmap_atomic(page);
 236                 memcpy(dst, src + offset, copy);
 237                 kunmap_atomic(src);
 238         } else
 239                 memset(dst, 0, copy);
 240 
 241         if (copy < n) {
 242                 dst += copy;
 243                 sector += copy >> SECTOR_SHIFT;
 244                 copy = n - copy;
 245                 page = brd_lookup_page(brd, sector);
 246                 if (page) {
 247                         src = kmap_atomic(page);
 248                         memcpy(dst, src, copy);
 249                         kunmap_atomic(src);
 250                 } else
 251                         memset(dst, 0, copy);
 252         }
 253 }
 254 
 255 /*
 256  * Process a single bvec of a bio.
 257  */
 258 static int brd_do_bvec(struct brd_device *brd, struct page *page,
 259                         unsigned int len, unsigned int off, unsigned int op,
 260                         sector_t sector)
 261 {
 262         void *mem;
 263         int err = 0;
 264 
 265         if (op_is_write(op)) {
 266                 err = copy_to_brd_setup(brd, sector, len);
 267                 if (err)
 268                         goto out;
 269         }
 270 
 271         mem = kmap_atomic(page);
 272         if (!op_is_write(op)) {
 273                 copy_from_brd(mem + off, brd, sector, len);
 274                 flush_dcache_page(page);
 275         } else {
 276                 flush_dcache_page(page);
 277                 copy_to_brd(brd, mem + off, sector, len);
 278         }
 279         kunmap_atomic(mem);
 280 
 281 out:
 282         return err;
 283 }
 284 
 285 static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
 286 {
 287         struct brd_device *brd = bio->bi_disk->private_data;
 288         struct bio_vec bvec;
 289         sector_t sector;
 290         struct bvec_iter iter;
 291 
 292         sector = bio->bi_iter.bi_sector;
 293         if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
 294                 goto io_error;
 295 
 296         bio_for_each_segment(bvec, bio, iter) {
 297                 unsigned int len = bvec.bv_len;
 298                 int err;
 299 
 300                 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
 301                                   bio_op(bio), sector);
 302                 if (err)
 303                         goto io_error;
 304                 sector += len >> SECTOR_SHIFT;
 305         }
 306 
 307         bio_endio(bio);
 308         return BLK_QC_T_NONE;
 309 io_error:
 310         bio_io_error(bio);
 311         return BLK_QC_T_NONE;
 312 }
 313 
 314 static int brd_rw_page(struct block_device *bdev, sector_t sector,
 315                        struct page *page, unsigned int op)
 316 {
 317         struct brd_device *brd = bdev->bd_disk->private_data;
 318         int err;
 319 
 320         if (PageTransHuge(page))
 321                 return -ENOTSUPP;
 322         err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
 323         page_endio(page, op_is_write(op), err);
 324         return err;
 325 }
 326 
 327 static const struct block_device_operations brd_fops = {
 328         .owner =                THIS_MODULE,
 329         .rw_page =              brd_rw_page,
 330 };
 331 
 332 /*
 333  * And now the modules code and kernel interface.
 334  */
 335 static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
 336 module_param(rd_nr, int, 0444);
 337 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
 338 
 339 unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE;
 340 module_param(rd_size, ulong, 0444);
 341 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
 342 
 343 static int max_part = 1;
 344 module_param(max_part, int, 0444);
 345 MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
 346 
 347 MODULE_LICENSE("GPL");
 348 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
 349 MODULE_ALIAS("rd");
 350 
 351 #ifndef MODULE
 352 /* Legacy boot options - nonmodular */
 353 static int __init ramdisk_size(char *str)
 354 {
 355         rd_size = simple_strtol(str, NULL, 0);
 356         return 1;
 357 }
 358 __setup("ramdisk_size=", ramdisk_size);
 359 #endif
 360 
 361 /*
 362  * The device scheme is derived from loop.c. Keep them in synch where possible
 363  * (should share code eventually).
 364  */
 365 static LIST_HEAD(brd_devices);
 366 static DEFINE_MUTEX(brd_devices_mutex);
 367 
 368 static struct brd_device *brd_alloc(int i)
 369 {
 370         struct brd_device *brd;
 371         struct gendisk *disk;
 372 
 373         brd = kzalloc(sizeof(*brd), GFP_KERNEL);
 374         if (!brd)
 375                 goto out;
 376         brd->brd_number         = i;
 377         spin_lock_init(&brd->brd_lock);
 378         INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
 379 
 380         brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
 381         if (!brd->brd_queue)
 382                 goto out_free_dev;
 383 
 384         blk_queue_make_request(brd->brd_queue, brd_make_request);
 385         blk_queue_max_hw_sectors(brd->brd_queue, 1024);
 386 
 387         /* This is so fdisk will align partitions on 4k, because of
 388          * direct_access API needing 4k alignment, returning a PFN
 389          * (This is only a problem on very small devices <= 4M,
 390          *  otherwise fdisk will align on 1M. Regardless this call
 391          *  is harmless)
 392          */
 393         blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
 394         disk = brd->brd_disk = alloc_disk(max_part);
 395         if (!disk)
 396                 goto out_free_queue;
 397         disk->major             = RAMDISK_MAJOR;
 398         disk->first_minor       = i * max_part;
 399         disk->fops              = &brd_fops;
 400         disk->private_data      = brd;
 401         disk->flags             = GENHD_FL_EXT_DEVT;
 402         sprintf(disk->disk_name, "ram%d", i);
 403         set_capacity(disk, rd_size * 2);
 404         brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
 405 
 406         /* Tell the block layer that this is not a rotational device */
 407         blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue);
 408         blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, brd->brd_queue);
 409 
 410         return brd;
 411 
 412 out_free_queue:
 413         blk_cleanup_queue(brd->brd_queue);
 414 out_free_dev:
 415         kfree(brd);
 416 out:
 417         return NULL;
 418 }
 419 
 420 static void brd_free(struct brd_device *brd)
 421 {
 422         put_disk(brd->brd_disk);
 423         blk_cleanup_queue(brd->brd_queue);
 424         brd_free_pages(brd);
 425         kfree(brd);
 426 }
 427 
 428 static struct brd_device *brd_init_one(int i, bool *new)
 429 {
 430         struct brd_device *brd;
 431 
 432         *new = false;
 433         list_for_each_entry(brd, &brd_devices, brd_list) {
 434                 if (brd->brd_number == i)
 435                         goto out;
 436         }
 437 
 438         brd = brd_alloc(i);
 439         if (brd) {
 440                 brd->brd_disk->queue = brd->brd_queue;
 441                 add_disk(brd->brd_disk);
 442                 list_add_tail(&brd->brd_list, &brd_devices);
 443         }
 444         *new = true;
 445 out:
 446         return brd;
 447 }
 448 
 449 static void brd_del_one(struct brd_device *brd)
 450 {
 451         list_del(&brd->brd_list);
 452         del_gendisk(brd->brd_disk);
 453         brd_free(brd);
 454 }
 455 
 456 static struct kobject *brd_probe(dev_t dev, int *part, void *data)
 457 {
 458         struct brd_device *brd;
 459         struct kobject *kobj;
 460         bool new;
 461 
 462         mutex_lock(&brd_devices_mutex);
 463         brd = brd_init_one(MINOR(dev) / max_part, &new);
 464         kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL;
 465         mutex_unlock(&brd_devices_mutex);
 466 
 467         if (new)
 468                 *part = 0;
 469 
 470         return kobj;
 471 }
 472 
 473 static inline void brd_check_and_reset_par(void)
 474 {
 475         if (unlikely(!max_part))
 476                 max_part = 1;
 477 
 478         /*
 479          * make sure 'max_part' can be divided exactly by (1U << MINORBITS),
 480          * otherwise, it is possiable to get same dev_t when adding partitions.
 481          */
 482         if ((1U << MINORBITS) % max_part != 0)
 483                 max_part = 1UL << fls(max_part);
 484 
 485         if (max_part > DISK_MAX_PARTS) {
 486                 pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
 487                         DISK_MAX_PARTS, DISK_MAX_PARTS);
 488                 max_part = DISK_MAX_PARTS;
 489         }
 490 }
 491 
 492 static int __init brd_init(void)
 493 {
 494         struct brd_device *brd, *next;
 495         int i;
 496 
 497         /*
 498          * brd module now has a feature to instantiate underlying device
 499          * structure on-demand, provided that there is an access dev node.
 500          *
 501          * (1) if rd_nr is specified, create that many upfront. else
 502          *     it defaults to CONFIG_BLK_DEV_RAM_COUNT
 503          * (2) User can further extend brd devices by create dev node themselves
 504          *     and have kernel automatically instantiate actual device
 505          *     on-demand. Example:
 506          *              mknod /path/devnod_name b 1 X   # 1 is the rd major
 507          *              fdisk -l /path/devnod_name
 508          *      If (X / max_part) was not already created it will be created
 509          *      dynamically.
 510          */
 511 
 512         if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
 513                 return -EIO;
 514 
 515         brd_check_and_reset_par();
 516 
 517         for (i = 0; i < rd_nr; i++) {
 518                 brd = brd_alloc(i);
 519                 if (!brd)
 520                         goto out_free;
 521                 list_add_tail(&brd->brd_list, &brd_devices);
 522         }
 523 
 524         /* point of no return */
 525 
 526         list_for_each_entry(brd, &brd_devices, brd_list) {
 527                 /*
 528                  * associate with queue just before adding disk for
 529                  * avoiding to mess up failure path
 530                  */
 531                 brd->brd_disk->queue = brd->brd_queue;
 532                 add_disk(brd->brd_disk);
 533         }
 534 
 535         blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
 536                                   THIS_MODULE, brd_probe, NULL, NULL);
 537 
 538         pr_info("brd: module loaded\n");
 539         return 0;
 540 
 541 out_free:
 542         list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
 543                 list_del(&brd->brd_list);
 544                 brd_free(brd);
 545         }
 546         unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
 547 
 548         pr_info("brd: module NOT loaded !!!\n");
 549         return -ENOMEM;
 550 }
 551 
 552 static void __exit brd_exit(void)
 553 {
 554         struct brd_device *brd, *next;
 555 
 556         list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
 557                 brd_del_one(brd);
 558 
 559         blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS);
 560         unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
 561 
 562         pr_info("brd: module unloaded\n");
 563 }
 564 
 565 module_init(brd_init);
 566 module_exit(brd_exit);
 567 

/* [<][>][^][v][top][bottom][index][help] */