root/mm/kasan/common.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. in_irqentry_text
  2. filter_irq_stacks
  3. save_stack
  4. set_track
  5. kasan_enable_current
  6. kasan_disable_current
  7. __kasan_check_read
  8. __kasan_check_write
  9. memset
  10. memmove
  11. memcpy
  12. kasan_poison_shadow
  13. kasan_unpoison_shadow
  14. __kasan_unpoison_stack
  15. kasan_unpoison_task_stack
  16. kasan_unpoison_task_stack_below
  17. kasan_unpoison_stack_above_sp_to
  18. kasan_alloc_pages
  19. kasan_free_pages
  20. optimal_redzone
  21. kasan_cache_create
  22. kasan_metadata_size
  23. get_alloc_info
  24. get_free_info
  25. kasan_set_free_info
  26. kasan_poison_slab
  27. kasan_unpoison_object_data
  28. kasan_poison_object_data
  29. assign_tag
  30. kasan_init_slab_obj
  31. shadow_invalid
  32. __kasan_slab_free
  33. kasan_slab_free
  34. __kasan_kmalloc
  35. kasan_slab_alloc
  36. kasan_kmalloc
  37. kasan_kmalloc_large
  38. kasan_krealloc
  39. kasan_poison_kfree
  40. kasan_kfree_large
  41. kasan_module_alloc
  42. kasan_free_shadow
  43. kasan_report
  44. shadow_mapped
  45. kasan_mem_notifier
  46. kasan_memhotplug_init

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * This file contains common generic and tag-based KASAN code.
   4  *
   5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
   6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
   7  *
   8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
   9  *        Andrey Konovalov <andreyknvl@gmail.com>
  10  *
  11  * This program is free software; you can redistribute it and/or modify
  12  * it under the terms of the GNU General Public License version 2 as
  13  * published by the Free Software Foundation.
  14  *
  15  */
  16 
  17 #include <linux/export.h>
  18 #include <linux/interrupt.h>
  19 #include <linux/init.h>
  20 #include <linux/kasan.h>
  21 #include <linux/kernel.h>
  22 #include <linux/kmemleak.h>
  23 #include <linux/linkage.h>
  24 #include <linux/memblock.h>
  25 #include <linux/memory.h>
  26 #include <linux/mm.h>
  27 #include <linux/module.h>
  28 #include <linux/printk.h>
  29 #include <linux/sched.h>
  30 #include <linux/sched/task_stack.h>
  31 #include <linux/slab.h>
  32 #include <linux/stacktrace.h>
  33 #include <linux/string.h>
  34 #include <linux/types.h>
  35 #include <linux/vmalloc.h>
  36 #include <linux/bug.h>
  37 #include <linux/uaccess.h>
  38 
  39 #include "kasan.h"
  40 #include "../slab.h"
  41 
  42 static inline int in_irqentry_text(unsigned long ptr)
  43 {
  44         return (ptr >= (unsigned long)&__irqentry_text_start &&
  45                 ptr < (unsigned long)&__irqentry_text_end) ||
  46                 (ptr >= (unsigned long)&__softirqentry_text_start &&
  47                  ptr < (unsigned long)&__softirqentry_text_end);
  48 }
  49 
  50 static inline unsigned int filter_irq_stacks(unsigned long *entries,
  51                                              unsigned int nr_entries)
  52 {
  53         unsigned int i;
  54 
  55         for (i = 0; i < nr_entries; i++) {
  56                 if (in_irqentry_text(entries[i])) {
  57                         /* Include the irqentry function into the stack. */
  58                         return i + 1;
  59                 }
  60         }
  61         return nr_entries;
  62 }
  63 
  64 static inline depot_stack_handle_t save_stack(gfp_t flags)
  65 {
  66         unsigned long entries[KASAN_STACK_DEPTH];
  67         unsigned int nr_entries;
  68 
  69         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
  70         nr_entries = filter_irq_stacks(entries, nr_entries);
  71         return stack_depot_save(entries, nr_entries, flags);
  72 }
  73 
  74 static inline void set_track(struct kasan_track *track, gfp_t flags)
  75 {
  76         track->pid = current->pid;
  77         track->stack = save_stack(flags);
  78 }
  79 
  80 void kasan_enable_current(void)
  81 {
  82         current->kasan_depth++;
  83 }
  84 
  85 void kasan_disable_current(void)
  86 {
  87         current->kasan_depth--;
  88 }
  89 
  90 bool __kasan_check_read(const volatile void *p, unsigned int size)
  91 {
  92         return check_memory_region((unsigned long)p, size, false, _RET_IP_);
  93 }
  94 EXPORT_SYMBOL(__kasan_check_read);
  95 
  96 bool __kasan_check_write(const volatile void *p, unsigned int size)
  97 {
  98         return check_memory_region((unsigned long)p, size, true, _RET_IP_);
  99 }
 100 EXPORT_SYMBOL(__kasan_check_write);
 101 
 102 #undef memset
 103 void *memset(void *addr, int c, size_t len)
 104 {
 105         check_memory_region((unsigned long)addr, len, true, _RET_IP_);
 106 
 107         return __memset(addr, c, len);
 108 }
 109 
 110 #undef memmove
 111 void *memmove(void *dest, const void *src, size_t len)
 112 {
 113         check_memory_region((unsigned long)src, len, false, _RET_IP_);
 114         check_memory_region((unsigned long)dest, len, true, _RET_IP_);
 115 
 116         return __memmove(dest, src, len);
 117 }
 118 
 119 #undef memcpy
 120 void *memcpy(void *dest, const void *src, size_t len)
 121 {
 122         check_memory_region((unsigned long)src, len, false, _RET_IP_);
 123         check_memory_region((unsigned long)dest, len, true, _RET_IP_);
 124 
 125         return __memcpy(dest, src, len);
 126 }
 127 
 128 /*
 129  * Poisons the shadow memory for 'size' bytes starting from 'addr'.
 130  * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
 131  */
 132 void kasan_poison_shadow(const void *address, size_t size, u8 value)
 133 {
 134         void *shadow_start, *shadow_end;
 135 
 136         /*
 137          * Perform shadow offset calculation based on untagged address, as
 138          * some of the callers (e.g. kasan_poison_object_data) pass tagged
 139          * addresses to this function.
 140          */
 141         address = reset_tag(address);
 142 
 143         shadow_start = kasan_mem_to_shadow(address);
 144         shadow_end = kasan_mem_to_shadow(address + size);
 145 
 146         __memset(shadow_start, value, shadow_end - shadow_start);
 147 }
 148 
 149 void kasan_unpoison_shadow(const void *address, size_t size)
 150 {
 151         u8 tag = get_tag(address);
 152 
 153         /*
 154          * Perform shadow offset calculation based on untagged address, as
 155          * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
 156          * addresses to this function.
 157          */
 158         address = reset_tag(address);
 159 
 160         kasan_poison_shadow(address, size, tag);
 161 
 162         if (size & KASAN_SHADOW_MASK) {
 163                 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
 164 
 165                 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
 166                         *shadow = tag;
 167                 else
 168                         *shadow = size & KASAN_SHADOW_MASK;
 169         }
 170 }
 171 
 172 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
 173 {
 174         void *base = task_stack_page(task);
 175         size_t size = sp - base;
 176 
 177         kasan_unpoison_shadow(base, size);
 178 }
 179 
 180 /* Unpoison the entire stack for a task. */
 181 void kasan_unpoison_task_stack(struct task_struct *task)
 182 {
 183         __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
 184 }
 185 
 186 /* Unpoison the stack for the current task beyond a watermark sp value. */
 187 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
 188 {
 189         /*
 190          * Calculate the task stack base address.  Avoid using 'current'
 191          * because this function is called by early resume code which hasn't
 192          * yet set up the percpu register (%gs).
 193          */
 194         void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
 195 
 196         kasan_unpoison_shadow(base, watermark - base);
 197 }
 198 
 199 /*
 200  * Clear all poison for the region between the current SP and a provided
 201  * watermark value, as is sometimes required prior to hand-crafted asm function
 202  * returns in the middle of functions.
 203  */
 204 void kasan_unpoison_stack_above_sp_to(const void *watermark)
 205 {
 206         const void *sp = __builtin_frame_address(0);
 207         size_t size = watermark - sp;
 208 
 209         if (WARN_ON(sp > watermark))
 210                 return;
 211         kasan_unpoison_shadow(sp, size);
 212 }
 213 
 214 void kasan_alloc_pages(struct page *page, unsigned int order)
 215 {
 216         u8 tag;
 217         unsigned long i;
 218 
 219         if (unlikely(PageHighMem(page)))
 220                 return;
 221 
 222         tag = random_tag();
 223         for (i = 0; i < (1 << order); i++)
 224                 page_kasan_tag_set(page + i, tag);
 225         kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
 226 }
 227 
 228 void kasan_free_pages(struct page *page, unsigned int order)
 229 {
 230         if (likely(!PageHighMem(page)))
 231                 kasan_poison_shadow(page_address(page),
 232                                 PAGE_SIZE << order,
 233                                 KASAN_FREE_PAGE);
 234 }
 235 
 236 /*
 237  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
 238  * For larger allocations larger redzones are used.
 239  */
 240 static inline unsigned int optimal_redzone(unsigned int object_size)
 241 {
 242         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
 243                 return 0;
 244 
 245         return
 246                 object_size <= 64        - 16   ? 16 :
 247                 object_size <= 128       - 32   ? 32 :
 248                 object_size <= 512       - 64   ? 64 :
 249                 object_size <= 4096      - 128  ? 128 :
 250                 object_size <= (1 << 14) - 256  ? 256 :
 251                 object_size <= (1 << 15) - 512  ? 512 :
 252                 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
 253 }
 254 
 255 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
 256                         slab_flags_t *flags)
 257 {
 258         unsigned int orig_size = *size;
 259         unsigned int redzone_size;
 260         int redzone_adjust;
 261 
 262         /* Add alloc meta. */
 263         cache->kasan_info.alloc_meta_offset = *size;
 264         *size += sizeof(struct kasan_alloc_meta);
 265 
 266         /* Add free meta. */
 267         if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
 268             (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
 269              cache->object_size < sizeof(struct kasan_free_meta))) {
 270                 cache->kasan_info.free_meta_offset = *size;
 271                 *size += sizeof(struct kasan_free_meta);
 272         }
 273 
 274         redzone_size = optimal_redzone(cache->object_size);
 275         redzone_adjust = redzone_size - (*size - cache->object_size);
 276         if (redzone_adjust > 0)
 277                 *size += redzone_adjust;
 278 
 279         *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
 280                         max(*size, cache->object_size + redzone_size));
 281 
 282         /*
 283          * If the metadata doesn't fit, don't enable KASAN at all.
 284          */
 285         if (*size <= cache->kasan_info.alloc_meta_offset ||
 286                         *size <= cache->kasan_info.free_meta_offset) {
 287                 cache->kasan_info.alloc_meta_offset = 0;
 288                 cache->kasan_info.free_meta_offset = 0;
 289                 *size = orig_size;
 290                 return;
 291         }
 292 
 293         *flags |= SLAB_KASAN;
 294 }
 295 
 296 size_t kasan_metadata_size(struct kmem_cache *cache)
 297 {
 298         return (cache->kasan_info.alloc_meta_offset ?
 299                 sizeof(struct kasan_alloc_meta) : 0) +
 300                 (cache->kasan_info.free_meta_offset ?
 301                 sizeof(struct kasan_free_meta) : 0);
 302 }
 303 
 304 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
 305                                         const void *object)
 306 {
 307         return (void *)object + cache->kasan_info.alloc_meta_offset;
 308 }
 309 
 310 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
 311                                       const void *object)
 312 {
 313         BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
 314         return (void *)object + cache->kasan_info.free_meta_offset;
 315 }
 316 
 317 
 318 static void kasan_set_free_info(struct kmem_cache *cache,
 319                 void *object, u8 tag)
 320 {
 321         struct kasan_alloc_meta *alloc_meta;
 322         u8 idx = 0;
 323 
 324         alloc_meta = get_alloc_info(cache, object);
 325 
 326 #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
 327         idx = alloc_meta->free_track_idx;
 328         alloc_meta->free_pointer_tag[idx] = tag;
 329         alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS;
 330 #endif
 331 
 332         set_track(&alloc_meta->free_track[idx], GFP_NOWAIT);
 333 }
 334 
 335 void kasan_poison_slab(struct page *page)
 336 {
 337         unsigned long i;
 338 
 339         for (i = 0; i < compound_nr(page); i++)
 340                 page_kasan_tag_reset(page + i);
 341         kasan_poison_shadow(page_address(page), page_size(page),
 342                         KASAN_KMALLOC_REDZONE);
 343 }
 344 
 345 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
 346 {
 347         kasan_unpoison_shadow(object, cache->object_size);
 348 }
 349 
 350 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
 351 {
 352         kasan_poison_shadow(object,
 353                         round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
 354                         KASAN_KMALLOC_REDZONE);
 355 }
 356 
 357 /*
 358  * This function assigns a tag to an object considering the following:
 359  * 1. A cache might have a constructor, which might save a pointer to a slab
 360  *    object somewhere (e.g. in the object itself). We preassign a tag for
 361  *    each object in caches with constructors during slab creation and reuse
 362  *    the same tag each time a particular object is allocated.
 363  * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
 364  *    accessed after being freed. We preassign tags for objects in these
 365  *    caches as well.
 366  * 3. For SLAB allocator we can't preassign tags randomly since the freelist
 367  *    is stored as an array of indexes instead of a linked list. Assign tags
 368  *    based on objects indexes, so that objects that are next to each other
 369  *    get different tags.
 370  */
 371 static u8 assign_tag(struct kmem_cache *cache, const void *object,
 372                         bool init, bool keep_tag)
 373 {
 374         /*
 375          * 1. When an object is kmalloc()'ed, two hooks are called:
 376          *    kasan_slab_alloc() and kasan_kmalloc(). We assign the
 377          *    tag only in the first one.
 378          * 2. We reuse the same tag for krealloc'ed objects.
 379          */
 380         if (keep_tag)
 381                 return get_tag(object);
 382 
 383         /*
 384          * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
 385          * set, assign a tag when the object is being allocated (init == false).
 386          */
 387         if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
 388                 return init ? KASAN_TAG_KERNEL : random_tag();
 389 
 390         /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
 391 #ifdef CONFIG_SLAB
 392         /* For SLAB assign tags based on the object index in the freelist. */
 393         return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
 394 #else
 395         /*
 396          * For SLUB assign a random tag during slab creation, otherwise reuse
 397          * the already assigned tag.
 398          */
 399         return init ? random_tag() : get_tag(object);
 400 #endif
 401 }
 402 
 403 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
 404                                                 const void *object)
 405 {
 406         struct kasan_alloc_meta *alloc_info;
 407 
 408         if (!(cache->flags & SLAB_KASAN))
 409                 return (void *)object;
 410 
 411         alloc_info = get_alloc_info(cache, object);
 412         __memset(alloc_info, 0, sizeof(*alloc_info));
 413 
 414         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
 415                 object = set_tag(object,
 416                                 assign_tag(cache, object, true, false));
 417 
 418         return (void *)object;
 419 }
 420 
 421 static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
 422 {
 423         if (IS_ENABLED(CONFIG_KASAN_GENERIC))
 424                 return shadow_byte < 0 ||
 425                         shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
 426 
 427         /* else CONFIG_KASAN_SW_TAGS: */
 428         if ((u8)shadow_byte == KASAN_TAG_INVALID)
 429                 return true;
 430         if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
 431                 return true;
 432 
 433         return false;
 434 }
 435 
 436 static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
 437                               unsigned long ip, bool quarantine)
 438 {
 439         s8 shadow_byte;
 440         u8 tag;
 441         void *tagged_object;
 442         unsigned long rounded_up_size;
 443 
 444         tag = get_tag(object);
 445         tagged_object = object;
 446         object = reset_tag(object);
 447 
 448         if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
 449             object)) {
 450                 kasan_report_invalid_free(tagged_object, ip);
 451                 return true;
 452         }
 453 
 454         /* RCU slabs could be legally used after free within the RCU period */
 455         if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
 456                 return false;
 457 
 458         shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
 459         if (shadow_invalid(tag, shadow_byte)) {
 460                 kasan_report_invalid_free(tagged_object, ip);
 461                 return true;
 462         }
 463 
 464         rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
 465         kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
 466 
 467         if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
 468                         unlikely(!(cache->flags & SLAB_KASAN)))
 469                 return false;
 470 
 471         kasan_set_free_info(cache, object, tag);
 472 
 473         quarantine_put(get_free_info(cache, object), cache);
 474 
 475         return IS_ENABLED(CONFIG_KASAN_GENERIC);
 476 }
 477 
 478 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
 479 {
 480         return __kasan_slab_free(cache, object, ip, true);
 481 }
 482 
 483 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
 484                                 size_t size, gfp_t flags, bool keep_tag)
 485 {
 486         unsigned long redzone_start;
 487         unsigned long redzone_end;
 488         u8 tag = 0xff;
 489 
 490         if (gfpflags_allow_blocking(flags))
 491                 quarantine_reduce();
 492 
 493         if (unlikely(object == NULL))
 494                 return NULL;
 495 
 496         redzone_start = round_up((unsigned long)(object + size),
 497                                 KASAN_SHADOW_SCALE_SIZE);
 498         redzone_end = round_up((unsigned long)object + cache->object_size,
 499                                 KASAN_SHADOW_SCALE_SIZE);
 500 
 501         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
 502                 tag = assign_tag(cache, object, false, keep_tag);
 503 
 504         /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
 505         kasan_unpoison_shadow(set_tag(object, tag), size);
 506         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
 507                 KASAN_KMALLOC_REDZONE);
 508 
 509         if (cache->flags & SLAB_KASAN)
 510                 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
 511 
 512         return set_tag(object, tag);
 513 }
 514 
 515 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
 516                                         gfp_t flags)
 517 {
 518         return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
 519 }
 520 
 521 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
 522                                 size_t size, gfp_t flags)
 523 {
 524         return __kasan_kmalloc(cache, object, size, flags, true);
 525 }
 526 EXPORT_SYMBOL(kasan_kmalloc);
 527 
 528 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
 529                                                 gfp_t flags)
 530 {
 531         struct page *page;
 532         unsigned long redzone_start;
 533         unsigned long redzone_end;
 534 
 535         if (gfpflags_allow_blocking(flags))
 536                 quarantine_reduce();
 537 
 538         if (unlikely(ptr == NULL))
 539                 return NULL;
 540 
 541         page = virt_to_page(ptr);
 542         redzone_start = round_up((unsigned long)(ptr + size),
 543                                 KASAN_SHADOW_SCALE_SIZE);
 544         redzone_end = (unsigned long)ptr + page_size(page);
 545 
 546         kasan_unpoison_shadow(ptr, size);
 547         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
 548                 KASAN_PAGE_REDZONE);
 549 
 550         return (void *)ptr;
 551 }
 552 
 553 void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
 554 {
 555         struct page *page;
 556 
 557         if (unlikely(object == ZERO_SIZE_PTR))
 558                 return (void *)object;
 559 
 560         page = virt_to_head_page(object);
 561 
 562         if (unlikely(!PageSlab(page)))
 563                 return kasan_kmalloc_large(object, size, flags);
 564         else
 565                 return __kasan_kmalloc(page->slab_cache, object, size,
 566                                                 flags, true);
 567 }
 568 
 569 void kasan_poison_kfree(void *ptr, unsigned long ip)
 570 {
 571         struct page *page;
 572 
 573         page = virt_to_head_page(ptr);
 574 
 575         if (unlikely(!PageSlab(page))) {
 576                 if (ptr != page_address(page)) {
 577                         kasan_report_invalid_free(ptr, ip);
 578                         return;
 579                 }
 580                 kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
 581         } else {
 582                 __kasan_slab_free(page->slab_cache, ptr, ip, false);
 583         }
 584 }
 585 
 586 void kasan_kfree_large(void *ptr, unsigned long ip)
 587 {
 588         if (ptr != page_address(virt_to_head_page(ptr)))
 589                 kasan_report_invalid_free(ptr, ip);
 590         /* The object will be poisoned by page_alloc. */
 591 }
 592 
 593 int kasan_module_alloc(void *addr, size_t size)
 594 {
 595         void *ret;
 596         size_t scaled_size;
 597         size_t shadow_size;
 598         unsigned long shadow_start;
 599 
 600         shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
 601         scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
 602         shadow_size = round_up(scaled_size, PAGE_SIZE);
 603 
 604         if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
 605                 return -EINVAL;
 606 
 607         ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
 608                         shadow_start + shadow_size,
 609                         GFP_KERNEL,
 610                         PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
 611                         __builtin_return_address(0));
 612 
 613         if (ret) {
 614                 __memset(ret, KASAN_SHADOW_INIT, shadow_size);
 615                 find_vm_area(addr)->flags |= VM_KASAN;
 616                 kmemleak_ignore(ret);
 617                 return 0;
 618         }
 619 
 620         return -ENOMEM;
 621 }
 622 
 623 void kasan_free_shadow(const struct vm_struct *vm)
 624 {
 625         if (vm->flags & VM_KASAN)
 626                 vfree(kasan_mem_to_shadow(vm->addr));
 627 }
 628 
 629 extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
 630 
 631 void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
 632 {
 633         unsigned long flags = user_access_save();
 634         __kasan_report(addr, size, is_write, ip);
 635         user_access_restore(flags);
 636 }
 637 
 638 #ifdef CONFIG_MEMORY_HOTPLUG
 639 static bool shadow_mapped(unsigned long addr)
 640 {
 641         pgd_t *pgd = pgd_offset_k(addr);
 642         p4d_t *p4d;
 643         pud_t *pud;
 644         pmd_t *pmd;
 645         pte_t *pte;
 646 
 647         if (pgd_none(*pgd))
 648                 return false;
 649         p4d = p4d_offset(pgd, addr);
 650         if (p4d_none(*p4d))
 651                 return false;
 652         pud = pud_offset(p4d, addr);
 653         if (pud_none(*pud))
 654                 return false;
 655 
 656         /*
 657          * We can't use pud_large() or pud_huge(), the first one is
 658          * arch-specific, the last one depends on HUGETLB_PAGE.  So let's abuse
 659          * pud_bad(), if pud is bad then it's bad because it's huge.
 660          */
 661         if (pud_bad(*pud))
 662                 return true;
 663         pmd = pmd_offset(pud, addr);
 664         if (pmd_none(*pmd))
 665                 return false;
 666 
 667         if (pmd_bad(*pmd))
 668                 return true;
 669         pte = pte_offset_kernel(pmd, addr);
 670         return !pte_none(*pte);
 671 }
 672 
 673 static int __meminit kasan_mem_notifier(struct notifier_block *nb,
 674                         unsigned long action, void *data)
 675 {
 676         struct memory_notify *mem_data = data;
 677         unsigned long nr_shadow_pages, start_kaddr, shadow_start;
 678         unsigned long shadow_end, shadow_size;
 679 
 680         nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
 681         start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
 682         shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
 683         shadow_size = nr_shadow_pages << PAGE_SHIFT;
 684         shadow_end = shadow_start + shadow_size;
 685 
 686         if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
 687                 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
 688                 return NOTIFY_BAD;
 689 
 690         switch (action) {
 691         case MEM_GOING_ONLINE: {
 692                 void *ret;
 693 
 694                 /*
 695                  * If shadow is mapped already than it must have been mapped
 696                  * during the boot. This could happen if we onlining previously
 697                  * offlined memory.
 698                  */
 699                 if (shadow_mapped(shadow_start))
 700                         return NOTIFY_OK;
 701 
 702                 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
 703                                         shadow_end, GFP_KERNEL,
 704                                         PAGE_KERNEL, VM_NO_GUARD,
 705                                         pfn_to_nid(mem_data->start_pfn),
 706                                         __builtin_return_address(0));
 707                 if (!ret)
 708                         return NOTIFY_BAD;
 709 
 710                 kmemleak_ignore(ret);
 711                 return NOTIFY_OK;
 712         }
 713         case MEM_CANCEL_ONLINE:
 714         case MEM_OFFLINE: {
 715                 struct vm_struct *vm;
 716 
 717                 /*
 718                  * shadow_start was either mapped during boot by kasan_init()
 719                  * or during memory online by __vmalloc_node_range().
 720                  * In the latter case we can use vfree() to free shadow.
 721                  * Non-NULL result of the find_vm_area() will tell us if
 722                  * that was the second case.
 723                  *
 724                  * Currently it's not possible to free shadow mapped
 725                  * during boot by kasan_init(). It's because the code
 726                  * to do that hasn't been written yet. So we'll just
 727                  * leak the memory.
 728                  */
 729                 vm = find_vm_area((void *)shadow_start);
 730                 if (vm)
 731                         vfree((void *)shadow_start);
 732         }
 733         }
 734 
 735         return NOTIFY_OK;
 736 }
 737 
 738 static int __init kasan_memhotplug_init(void)
 739 {
 740         hotplug_memory_notifier(kasan_mem_notifier, 0);
 741 
 742         return 0;
 743 }
 744 
 745 core_initcall(kasan_memhotplug_init);
 746 #endif

/* [<][>][^][v][top][bottom][index][help] */