root/kernel/module.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __mod_tree_val
  2. __mod_tree_size
  3. mod_tree_less
  4. mod_tree_comp
  5. __mod_tree_insert
  6. __mod_tree_remove
  7. mod_tree_insert
  8. mod_tree_remove_init
  9. mod_tree_remove
  10. mod_find
  11. mod_tree_insert
  12. mod_tree_remove_init
  13. mod_tree_remove
  14. mod_find
  15. __mod_update_bounds
  16. mod_update_bounds
  17. module_assert_mutex
  18. module_assert_mutex_or_preempt
  19. is_module_sig_enforced
  20. set_module_sig_enforced
  21. register_module_notifier
  22. unregister_module_notifier
  23. strong_try_module_get
  24. add_taint_module
  25. __module_put_and_exit
  26. find_sec
  27. section_addr
  28. section_objs
  29. each_symbol_in_section
  30. each_symbol_section
  31. check_exported_symbol
  32. kernel_symbol_value
  33. kernel_symbol_name
  34. kernel_symbol_namespace
  35. cmp_name
  36. find_exported_symbol_in_section
  37. find_symbol
  38. find_module_all
  39. find_module
  40. mod_percpu
  41. percpu_modalloc
  42. percpu_modfree
  43. find_pcpusec
  44. percpu_modcopy
  45. __is_module_percpu_address
  46. is_module_percpu_address
  47. mod_percpu
  48. percpu_modalloc
  49. percpu_modfree
  50. find_pcpusec
  51. percpu_modcopy
  52. is_module_percpu_address
  53. __is_module_percpu_address
  54. module_unload_init
  55. already_uses
  56. add_module_usage
  57. ref_module
  58. module_unload_free
  59. try_force_unload
  60. try_force_unload
  61. try_release_module_ref
  62. try_stop_module
  63. module_refcount
  64. SYSCALL_DEFINE2
  65. print_unload_info
  66. __symbol_put
  67. symbol_put_addr
  68. show_refcnt
  69. __module_get
  70. try_module_get
  71. module_put
  72. print_unload_info
  73. module_unload_free
  74. ref_module
  75. module_unload_init
  76. module_flags_taint
  77. show_initstate
  78. store_uevent
  79. show_coresize
  80. show_initsize
  81. show_taint
  82. try_to_force_load
  83. resolve_rel_crc
  84. check_version
  85. check_modstruct_version
  86. same_magic
  87. check_version
  88. check_modstruct_version
  89. same_magic
  90. verify_namespace_is_imported
  91. resolve_symbol
  92. resolve_symbol_wait
  93. sect_empty
  94. module_sect_show
  95. free_sect_attrs
  96. add_sect_attrs
  97. remove_sect_attrs
  98. module_notes_read
  99. free_notes_attrs
  100. add_notes_attrs
  101. remove_notes_attrs
  102. add_sect_attrs
  103. remove_sect_attrs
  104. add_notes_attrs
  105. remove_notes_attrs
  106. del_usage_links
  107. add_usage_links
  108. module_add_modinfo_attrs
  109. module_remove_modinfo_attrs
  110. mod_kobject_put
  111. mod_sysfs_init
  112. mod_sysfs_setup
  113. mod_sysfs_fini
  114. init_param_lock
  115. mod_sysfs_setup
  116. mod_sysfs_fini
  117. module_remove_modinfo_attrs
  118. del_usage_links
  119. init_param_lock
  120. mod_sysfs_teardown
  121. frob_text
  122. frob_rodata
  123. frob_ro_after_init
  124. frob_writable_data
  125. module_disable_ro
  126. module_enable_ro
  127. module_enable_nx
  128. set_all_modules_text_rw
  129. set_all_modules_text_ro
  130. module_enable_nx
  131. module_enable_x
  132. module_enable_nx
  133. module_enable_x
  134. copy_module_elf
  135. free_module_elf
  136. copy_module_elf
  137. free_module_elf
  138. module_memfree
  139. module_arch_cleanup
  140. module_arch_freeing_init
  141. free_module
  142. __symbol_get
  143. verify_exported_symbols
  144. simplify_symbols
  145. apply_relocations
  146. arch_mod_section_prepend
  147. get_offset
  148. layout_sections
  149. set_license
  150. next_string
  151. get_next_modinfo
  152. get_modinfo
  153. setup_modinfo
  154. free_modinfo
  155. lookup_exported_symbol
  156. is_exported
  157. elf_type
  158. is_core_symbol
  159. layout_symtab
  160. add_kallsyms
  161. layout_symtab
  162. add_kallsyms
  163. dynamic_debug_setup
  164. dynamic_debug_remove
  165. module_alloc
  166. module_exit_section
  167. kmemleak_load_module
  168. kmemleak_load_module
  169. module_sig_check
  170. module_sig_check
  171. elf_header_check
  172. copy_chunked_from_user
  173. check_modinfo_livepatch
  174. check_modinfo_livepatch
  175. check_modinfo_retpoline
  176. copy_module_from_user
  177. free_copy
  178. rewrite_section_headers
  179. setup_load_info
  180. check_modinfo
  181. find_module_sections
  182. move_module
  183. check_module_license_and_versions
  184. flush_module_icache
  185. module_frob_arch_sections
  186. blacklisted
  187. layout_and_allocate
  188. module_deallocate
  189. module_finalize
  190. post_relocation
  191. finished_loading
  192. do_mod_ctors
  193. do_free_init
  194. modules_wq_init
  195. do_init_module
  196. may_init_module
  197. add_unformed_module
  198. complete_formation
  199. prepare_coming_module
  200. unknown_module_param_cb
  201. load_module
  202. SYSCALL_DEFINE3
  203. SYSCALL_DEFINE3
  204. within
  205. is_arm_mapping_symbol
  206. kallsyms_symbol_name
  207. find_kallsyms_symbol
  208. dereference_module_function_descriptor
  209. module_address_lookup
  210. lookup_module_symbol_name
  211. lookup_module_symbol_attrs
  212. module_get_kallsym
  213. find_kallsyms_symbol_value
  214. module_kallsyms_lookup_name
  215. module_kallsyms_on_each_symbol
  216. module_flags
  217. m_start
  218. m_next
  219. m_stop
  220. m_show
  221. modules_open
  222. proc_modules_init
  223. search_module_extables
  224. is_module_address
  225. __module_address
  226. is_module_text_address
  227. __module_text_address
  228. print_modules
  229. module_layout

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3    Copyright (C) 2002 Richard Henderson
   4    Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
   5 
   6 */
   7 #include <linux/export.h>
   8 #include <linux/extable.h>
   9 #include <linux/moduleloader.h>
  10 #include <linux/module_signature.h>
  11 #include <linux/trace_events.h>
  12 #include <linux/init.h>
  13 #include <linux/kallsyms.h>
  14 #include <linux/file.h>
  15 #include <linux/fs.h>
  16 #include <linux/sysfs.h>
  17 #include <linux/kernel.h>
  18 #include <linux/slab.h>
  19 #include <linux/vmalloc.h>
  20 #include <linux/elf.h>
  21 #include <linux/proc_fs.h>
  22 #include <linux/security.h>
  23 #include <linux/seq_file.h>
  24 #include <linux/syscalls.h>
  25 #include <linux/fcntl.h>
  26 #include <linux/rcupdate.h>
  27 #include <linux/capability.h>
  28 #include <linux/cpu.h>
  29 #include <linux/moduleparam.h>
  30 #include <linux/errno.h>
  31 #include <linux/err.h>
  32 #include <linux/vermagic.h>
  33 #include <linux/notifier.h>
  34 #include <linux/sched.h>
  35 #include <linux/device.h>
  36 #include <linux/string.h>
  37 #include <linux/mutex.h>
  38 #include <linux/rculist.h>
  39 #include <linux/uaccess.h>
  40 #include <asm/cacheflush.h>
  41 #include <linux/set_memory.h>
  42 #include <asm/mmu_context.h>
  43 #include <linux/license.h>
  44 #include <asm/sections.h>
  45 #include <linux/tracepoint.h>
  46 #include <linux/ftrace.h>
  47 #include <linux/livepatch.h>
  48 #include <linux/async.h>
  49 #include <linux/percpu.h>
  50 #include <linux/kmemleak.h>
  51 #include <linux/jump_label.h>
  52 #include <linux/pfn.h>
  53 #include <linux/bsearch.h>
  54 #include <linux/dynamic_debug.h>
  55 #include <linux/audit.h>
  56 #include <uapi/linux/module.h>
  57 #include "module-internal.h"
  58 
  59 #define CREATE_TRACE_POINTS
  60 #include <trace/events/module.h>
  61 
  62 #ifndef ARCH_SHF_SMALL
  63 #define ARCH_SHF_SMALL 0
  64 #endif
  65 
  66 /*
  67  * Modules' sections will be aligned on page boundaries
  68  * to ensure complete separation of code and data, but
  69  * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
  70  */
  71 #ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
  72 # define debug_align(X) ALIGN(X, PAGE_SIZE)
  73 #else
  74 # define debug_align(X) (X)
  75 #endif
  76 
  77 /* If this is set, the section belongs in the init part of the module */
  78 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
  79 
  80 /*
  81  * Mutex protects:
  82  * 1) List of modules (also safely readable with preempt_disable),
  83  * 2) module_use links,
  84  * 3) module_addr_min/module_addr_max.
  85  * (delete and add uses RCU list operations). */
  86 DEFINE_MUTEX(module_mutex);
  87 EXPORT_SYMBOL_GPL(module_mutex);
  88 static LIST_HEAD(modules);
  89 
  90 /* Work queue for freeing init sections in success case */
  91 static struct work_struct init_free_wq;
  92 static struct llist_head init_free_list;
  93 
  94 #ifdef CONFIG_MODULES_TREE_LOOKUP
  95 
  96 /*
  97  * Use a latched RB-tree for __module_address(); this allows us to use
  98  * RCU-sched lookups of the address from any context.
  99  *
 100  * This is conditional on PERF_EVENTS || TRACING because those can really hit
 101  * __module_address() hard by doing a lot of stack unwinding; potentially from
 102  * NMI context.
 103  */
 104 
 105 static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
 106 {
 107         struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
 108 
 109         return (unsigned long)layout->base;
 110 }
 111 
 112 static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
 113 {
 114         struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
 115 
 116         return (unsigned long)layout->size;
 117 }
 118 
 119 static __always_inline bool
 120 mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
 121 {
 122         return __mod_tree_val(a) < __mod_tree_val(b);
 123 }
 124 
 125 static __always_inline int
 126 mod_tree_comp(void *key, struct latch_tree_node *n)
 127 {
 128         unsigned long val = (unsigned long)key;
 129         unsigned long start, end;
 130 
 131         start = __mod_tree_val(n);
 132         if (val < start)
 133                 return -1;
 134 
 135         end = start + __mod_tree_size(n);
 136         if (val >= end)
 137                 return 1;
 138 
 139         return 0;
 140 }
 141 
 142 static const struct latch_tree_ops mod_tree_ops = {
 143         .less = mod_tree_less,
 144         .comp = mod_tree_comp,
 145 };
 146 
 147 static struct mod_tree_root {
 148         struct latch_tree_root root;
 149         unsigned long addr_min;
 150         unsigned long addr_max;
 151 } mod_tree __cacheline_aligned = {
 152         .addr_min = -1UL,
 153 };
 154 
 155 #define module_addr_min mod_tree.addr_min
 156 #define module_addr_max mod_tree.addr_max
 157 
 158 static noinline void __mod_tree_insert(struct mod_tree_node *node)
 159 {
 160         latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
 161 }
 162 
 163 static void __mod_tree_remove(struct mod_tree_node *node)
 164 {
 165         latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
 166 }
 167 
 168 /*
 169  * These modifications: insert, remove_init and remove; are serialized by the
 170  * module_mutex.
 171  */
 172 static void mod_tree_insert(struct module *mod)
 173 {
 174         mod->core_layout.mtn.mod = mod;
 175         mod->init_layout.mtn.mod = mod;
 176 
 177         __mod_tree_insert(&mod->core_layout.mtn);
 178         if (mod->init_layout.size)
 179                 __mod_tree_insert(&mod->init_layout.mtn);
 180 }
 181 
 182 static void mod_tree_remove_init(struct module *mod)
 183 {
 184         if (mod->init_layout.size)
 185                 __mod_tree_remove(&mod->init_layout.mtn);
 186 }
 187 
 188 static void mod_tree_remove(struct module *mod)
 189 {
 190         __mod_tree_remove(&mod->core_layout.mtn);
 191         mod_tree_remove_init(mod);
 192 }
 193 
 194 static struct module *mod_find(unsigned long addr)
 195 {
 196         struct latch_tree_node *ltn;
 197 
 198         ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
 199         if (!ltn)
 200                 return NULL;
 201 
 202         return container_of(ltn, struct mod_tree_node, node)->mod;
 203 }
 204 
 205 #else /* MODULES_TREE_LOOKUP */
 206 
 207 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
 208 
 209 static void mod_tree_insert(struct module *mod) { }
 210 static void mod_tree_remove_init(struct module *mod) { }
 211 static void mod_tree_remove(struct module *mod) { }
 212 
 213 static struct module *mod_find(unsigned long addr)
 214 {
 215         struct module *mod;
 216 
 217         list_for_each_entry_rcu(mod, &modules, list,
 218                                 lockdep_is_held(&module_mutex)) {
 219                 if (within_module(addr, mod))
 220                         return mod;
 221         }
 222 
 223         return NULL;
 224 }
 225 
 226 #endif /* MODULES_TREE_LOOKUP */
 227 
 228 /*
 229  * Bounds of module text, for speeding up __module_address.
 230  * Protected by module_mutex.
 231  */
 232 static void __mod_update_bounds(void *base, unsigned int size)
 233 {
 234         unsigned long min = (unsigned long)base;
 235         unsigned long max = min + size;
 236 
 237         if (min < module_addr_min)
 238                 module_addr_min = min;
 239         if (max > module_addr_max)
 240                 module_addr_max = max;
 241 }
 242 
 243 static void mod_update_bounds(struct module *mod)
 244 {
 245         __mod_update_bounds(mod->core_layout.base, mod->core_layout.size);
 246         if (mod->init_layout.size)
 247                 __mod_update_bounds(mod->init_layout.base, mod->init_layout.size);
 248 }
 249 
 250 #ifdef CONFIG_KGDB_KDB
 251 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
 252 #endif /* CONFIG_KGDB_KDB */
 253 
 254 static void module_assert_mutex(void)
 255 {
 256         lockdep_assert_held(&module_mutex);
 257 }
 258 
 259 static void module_assert_mutex_or_preempt(void)
 260 {
 261 #ifdef CONFIG_LOCKDEP
 262         if (unlikely(!debug_locks))
 263                 return;
 264 
 265         WARN_ON_ONCE(!rcu_read_lock_sched_held() &&
 266                 !lockdep_is_held(&module_mutex));
 267 #endif
 268 }
 269 
 270 static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
 271 module_param(sig_enforce, bool_enable_only, 0644);
 272 
 273 /*
 274  * Export sig_enforce kernel cmdline parameter to allow other subsystems rely
 275  * on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
 276  */
 277 bool is_module_sig_enforced(void)
 278 {
 279         return sig_enforce;
 280 }
 281 EXPORT_SYMBOL(is_module_sig_enforced);
 282 
 283 void set_module_sig_enforced(void)
 284 {
 285         sig_enforce = true;
 286 }
 287 
 288 /* Block module loading/unloading? */
 289 int modules_disabled = 0;
 290 core_param(nomodule, modules_disabled, bint, 0);
 291 
 292 /* Waiting for a module to finish initializing? */
 293 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
 294 
 295 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
 296 
 297 int register_module_notifier(struct notifier_block *nb)
 298 {
 299         return blocking_notifier_chain_register(&module_notify_list, nb);
 300 }
 301 EXPORT_SYMBOL(register_module_notifier);
 302 
 303 int unregister_module_notifier(struct notifier_block *nb)
 304 {
 305         return blocking_notifier_chain_unregister(&module_notify_list, nb);
 306 }
 307 EXPORT_SYMBOL(unregister_module_notifier);
 308 
 309 /*
 310  * We require a truly strong try_module_get(): 0 means success.
 311  * Otherwise an error is returned due to ongoing or failed
 312  * initialization etc.
 313  */
 314 static inline int strong_try_module_get(struct module *mod)
 315 {
 316         BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
 317         if (mod && mod->state == MODULE_STATE_COMING)
 318                 return -EBUSY;
 319         if (try_module_get(mod))
 320                 return 0;
 321         else
 322                 return -ENOENT;
 323 }
 324 
 325 static inline void add_taint_module(struct module *mod, unsigned flag,
 326                                     enum lockdep_ok lockdep_ok)
 327 {
 328         add_taint(flag, lockdep_ok);
 329         set_bit(flag, &mod->taints);
 330 }
 331 
 332 /*
 333  * A thread that wants to hold a reference to a module only while it
 334  * is running can call this to safely exit.  nfsd and lockd use this.
 335  */
 336 void __noreturn __module_put_and_exit(struct module *mod, long code)
 337 {
 338         module_put(mod);
 339         do_exit(code);
 340 }
 341 EXPORT_SYMBOL(__module_put_and_exit);
 342 
 343 /* Find a module section: 0 means not found. */
 344 static unsigned int find_sec(const struct load_info *info, const char *name)
 345 {
 346         unsigned int i;
 347 
 348         for (i = 1; i < info->hdr->e_shnum; i++) {
 349                 Elf_Shdr *shdr = &info->sechdrs[i];
 350                 /* Alloc bit cleared means "ignore it." */
 351                 if ((shdr->sh_flags & SHF_ALLOC)
 352                     && strcmp(info->secstrings + shdr->sh_name, name) == 0)
 353                         return i;
 354         }
 355         return 0;
 356 }
 357 
 358 /* Find a module section, or NULL. */
 359 static void *section_addr(const struct load_info *info, const char *name)
 360 {
 361         /* Section 0 has sh_addr 0. */
 362         return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
 363 }
 364 
 365 /* Find a module section, or NULL.  Fill in number of "objects" in section. */
 366 static void *section_objs(const struct load_info *info,
 367                           const char *name,
 368                           size_t object_size,
 369                           unsigned int *num)
 370 {
 371         unsigned int sec = find_sec(info, name);
 372 
 373         /* Section 0 has sh_addr 0 and sh_size 0. */
 374         *num = info->sechdrs[sec].sh_size / object_size;
 375         return (void *)info->sechdrs[sec].sh_addr;
 376 }
 377 
 378 /* Provided by the linker */
 379 extern const struct kernel_symbol __start___ksymtab[];
 380 extern const struct kernel_symbol __stop___ksymtab[];
 381 extern const struct kernel_symbol __start___ksymtab_gpl[];
 382 extern const struct kernel_symbol __stop___ksymtab_gpl[];
 383 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
 384 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
 385 extern const s32 __start___kcrctab[];
 386 extern const s32 __start___kcrctab_gpl[];
 387 extern const s32 __start___kcrctab_gpl_future[];
 388 #ifdef CONFIG_UNUSED_SYMBOLS
 389 extern const struct kernel_symbol __start___ksymtab_unused[];
 390 extern const struct kernel_symbol __stop___ksymtab_unused[];
 391 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
 392 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
 393 extern const s32 __start___kcrctab_unused[];
 394 extern const s32 __start___kcrctab_unused_gpl[];
 395 #endif
 396 
 397 #ifndef CONFIG_MODVERSIONS
 398 #define symversion(base, idx) NULL
 399 #else
 400 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
 401 #endif
 402 
 403 static bool each_symbol_in_section(const struct symsearch *arr,
 404                                    unsigned int arrsize,
 405                                    struct module *owner,
 406                                    bool (*fn)(const struct symsearch *syms,
 407                                               struct module *owner,
 408                                               void *data),
 409                                    void *data)
 410 {
 411         unsigned int j;
 412 
 413         for (j = 0; j < arrsize; j++) {
 414                 if (fn(&arr[j], owner, data))
 415                         return true;
 416         }
 417 
 418         return false;
 419 }
 420 
 421 /* Returns true as soon as fn returns true, otherwise false. */
 422 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
 423                                     struct module *owner,
 424                                     void *data),
 425                          void *data)
 426 {
 427         struct module *mod;
 428         static const struct symsearch arr[] = {
 429                 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
 430                   NOT_GPL_ONLY, false },
 431                 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
 432                   __start___kcrctab_gpl,
 433                   GPL_ONLY, false },
 434                 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
 435                   __start___kcrctab_gpl_future,
 436                   WILL_BE_GPL_ONLY, false },
 437 #ifdef CONFIG_UNUSED_SYMBOLS
 438                 { __start___ksymtab_unused, __stop___ksymtab_unused,
 439                   __start___kcrctab_unused,
 440                   NOT_GPL_ONLY, true },
 441                 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
 442                   __start___kcrctab_unused_gpl,
 443                   GPL_ONLY, true },
 444 #endif
 445         };
 446 
 447         module_assert_mutex_or_preempt();
 448 
 449         if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
 450                 return true;
 451 
 452         list_for_each_entry_rcu(mod, &modules, list,
 453                                 lockdep_is_held(&module_mutex)) {
 454                 struct symsearch arr[] = {
 455                         { mod->syms, mod->syms + mod->num_syms, mod->crcs,
 456                           NOT_GPL_ONLY, false },
 457                         { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
 458                           mod->gpl_crcs,
 459                           GPL_ONLY, false },
 460                         { mod->gpl_future_syms,
 461                           mod->gpl_future_syms + mod->num_gpl_future_syms,
 462                           mod->gpl_future_crcs,
 463                           WILL_BE_GPL_ONLY, false },
 464 #ifdef CONFIG_UNUSED_SYMBOLS
 465                         { mod->unused_syms,
 466                           mod->unused_syms + mod->num_unused_syms,
 467                           mod->unused_crcs,
 468                           NOT_GPL_ONLY, true },
 469                         { mod->unused_gpl_syms,
 470                           mod->unused_gpl_syms + mod->num_unused_gpl_syms,
 471                           mod->unused_gpl_crcs,
 472                           GPL_ONLY, true },
 473 #endif
 474                 };
 475 
 476                 if (mod->state == MODULE_STATE_UNFORMED)
 477                         continue;
 478 
 479                 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
 480                         return true;
 481         }
 482         return false;
 483 }
 484 EXPORT_SYMBOL_GPL(each_symbol_section);
 485 
 486 struct find_symbol_arg {
 487         /* Input */
 488         const char *name;
 489         bool gplok;
 490         bool warn;
 491 
 492         /* Output */
 493         struct module *owner;
 494         const s32 *crc;
 495         const struct kernel_symbol *sym;
 496 };
 497 
 498 static bool check_exported_symbol(const struct symsearch *syms,
 499                                   struct module *owner,
 500                                   unsigned int symnum, void *data)
 501 {
 502         struct find_symbol_arg *fsa = data;
 503 
 504         if (!fsa->gplok) {
 505                 if (syms->licence == GPL_ONLY)
 506                         return false;
 507                 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
 508                         pr_warn("Symbol %s is being used by a non-GPL module, "
 509                                 "which will not be allowed in the future\n",
 510                                 fsa->name);
 511                 }
 512         }
 513 
 514 #ifdef CONFIG_UNUSED_SYMBOLS
 515         if (syms->unused && fsa->warn) {
 516                 pr_warn("Symbol %s is marked as UNUSED, however this module is "
 517                         "using it.\n", fsa->name);
 518                 pr_warn("This symbol will go away in the future.\n");
 519                 pr_warn("Please evaluate if this is the right api to use and "
 520                         "if it really is, submit a report to the linux kernel "
 521                         "mailing list together with submitting your code for "
 522                         "inclusion.\n");
 523         }
 524 #endif
 525 
 526         fsa->owner = owner;
 527         fsa->crc = symversion(syms->crcs, symnum);
 528         fsa->sym = &syms->start[symnum];
 529         return true;
 530 }
 531 
 532 static unsigned long kernel_symbol_value(const struct kernel_symbol *sym)
 533 {
 534 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
 535         return (unsigned long)offset_to_ptr(&sym->value_offset);
 536 #else
 537         return sym->value;
 538 #endif
 539 }
 540 
 541 static const char *kernel_symbol_name(const struct kernel_symbol *sym)
 542 {
 543 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
 544         return offset_to_ptr(&sym->name_offset);
 545 #else
 546         return sym->name;
 547 #endif
 548 }
 549 
 550 static const char *kernel_symbol_namespace(const struct kernel_symbol *sym)
 551 {
 552 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
 553         if (!sym->namespace_offset)
 554                 return NULL;
 555         return offset_to_ptr(&sym->namespace_offset);
 556 #else
 557         return sym->namespace;
 558 #endif
 559 }
 560 
 561 static int cmp_name(const void *name, const void *sym)
 562 {
 563         return strcmp(name, kernel_symbol_name(sym));
 564 }
 565 
 566 static bool find_exported_symbol_in_section(const struct symsearch *syms,
 567                                             struct module *owner,
 568                                             void *data)
 569 {
 570         struct find_symbol_arg *fsa = data;
 571         struct kernel_symbol *sym;
 572 
 573         sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
 574                         sizeof(struct kernel_symbol), cmp_name);
 575 
 576         if (sym != NULL && check_exported_symbol(syms, owner,
 577                                                  sym - syms->start, data))
 578                 return true;
 579 
 580         return false;
 581 }
 582 
 583 /* Find an exported symbol and return it, along with, (optional) crc and
 584  * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
 585 const struct kernel_symbol *find_symbol(const char *name,
 586                                         struct module **owner,
 587                                         const s32 **crc,
 588                                         bool gplok,
 589                                         bool warn)
 590 {
 591         struct find_symbol_arg fsa;
 592 
 593         fsa.name = name;
 594         fsa.gplok = gplok;
 595         fsa.warn = warn;
 596 
 597         if (each_symbol_section(find_exported_symbol_in_section, &fsa)) {
 598                 if (owner)
 599                         *owner = fsa.owner;
 600                 if (crc)
 601                         *crc = fsa.crc;
 602                 return fsa.sym;
 603         }
 604 
 605         pr_debug("Failed to find symbol %s\n", name);
 606         return NULL;
 607 }
 608 EXPORT_SYMBOL_GPL(find_symbol);
 609 
 610 /*
 611  * Search for module by name: must hold module_mutex (or preempt disabled
 612  * for read-only access).
 613  */
 614 static struct module *find_module_all(const char *name, size_t len,
 615                                       bool even_unformed)
 616 {
 617         struct module *mod;
 618 
 619         module_assert_mutex_or_preempt();
 620 
 621         list_for_each_entry_rcu(mod, &modules, list,
 622                                 lockdep_is_held(&module_mutex)) {
 623                 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
 624                         continue;
 625                 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
 626                         return mod;
 627         }
 628         return NULL;
 629 }
 630 
 631 struct module *find_module(const char *name)
 632 {
 633         module_assert_mutex();
 634         return find_module_all(name, strlen(name), false);
 635 }
 636 EXPORT_SYMBOL_GPL(find_module);
 637 
 638 #ifdef CONFIG_SMP
 639 
 640 static inline void __percpu *mod_percpu(struct module *mod)
 641 {
 642         return mod->percpu;
 643 }
 644 
 645 static int percpu_modalloc(struct module *mod, struct load_info *info)
 646 {
 647         Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
 648         unsigned long align = pcpusec->sh_addralign;
 649 
 650         if (!pcpusec->sh_size)
 651                 return 0;
 652 
 653         if (align > PAGE_SIZE) {
 654                 pr_warn("%s: per-cpu alignment %li > %li\n",
 655                         mod->name, align, PAGE_SIZE);
 656                 align = PAGE_SIZE;
 657         }
 658 
 659         mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
 660         if (!mod->percpu) {
 661                 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
 662                         mod->name, (unsigned long)pcpusec->sh_size);
 663                 return -ENOMEM;
 664         }
 665         mod->percpu_size = pcpusec->sh_size;
 666         return 0;
 667 }
 668 
 669 static void percpu_modfree(struct module *mod)
 670 {
 671         free_percpu(mod->percpu);
 672 }
 673 
 674 static unsigned int find_pcpusec(struct load_info *info)
 675 {
 676         return find_sec(info, ".data..percpu");
 677 }
 678 
 679 static void percpu_modcopy(struct module *mod,
 680                            const void *from, unsigned long size)
 681 {
 682         int cpu;
 683 
 684         for_each_possible_cpu(cpu)
 685                 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
 686 }
 687 
 688 bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
 689 {
 690         struct module *mod;
 691         unsigned int cpu;
 692 
 693         preempt_disable();
 694 
 695         list_for_each_entry_rcu(mod, &modules, list) {
 696                 if (mod->state == MODULE_STATE_UNFORMED)
 697                         continue;
 698                 if (!mod->percpu_size)
 699                         continue;
 700                 for_each_possible_cpu(cpu) {
 701                         void *start = per_cpu_ptr(mod->percpu, cpu);
 702                         void *va = (void *)addr;
 703 
 704                         if (va >= start && va < start + mod->percpu_size) {
 705                                 if (can_addr) {
 706                                         *can_addr = (unsigned long) (va - start);
 707                                         *can_addr += (unsigned long)
 708                                                 per_cpu_ptr(mod->percpu,
 709                                                             get_boot_cpu_id());
 710                                 }
 711                                 preempt_enable();
 712                                 return true;
 713                         }
 714                 }
 715         }
 716 
 717         preempt_enable();
 718         return false;
 719 }
 720 
 721 /**
 722  * is_module_percpu_address - test whether address is from module static percpu
 723  * @addr: address to test
 724  *
 725  * Test whether @addr belongs to module static percpu area.
 726  *
 727  * RETURNS:
 728  * %true if @addr is from module static percpu area
 729  */
 730 bool is_module_percpu_address(unsigned long addr)
 731 {
 732         return __is_module_percpu_address(addr, NULL);
 733 }
 734 
 735 #else /* ... !CONFIG_SMP */
 736 
 737 static inline void __percpu *mod_percpu(struct module *mod)
 738 {
 739         return NULL;
 740 }
 741 static int percpu_modalloc(struct module *mod, struct load_info *info)
 742 {
 743         /* UP modules shouldn't have this section: ENOMEM isn't quite right */
 744         if (info->sechdrs[info->index.pcpu].sh_size != 0)
 745                 return -ENOMEM;
 746         return 0;
 747 }
 748 static inline void percpu_modfree(struct module *mod)
 749 {
 750 }
 751 static unsigned int find_pcpusec(struct load_info *info)
 752 {
 753         return 0;
 754 }
 755 static inline void percpu_modcopy(struct module *mod,
 756                                   const void *from, unsigned long size)
 757 {
 758         /* pcpusec should be 0, and size of that section should be 0. */
 759         BUG_ON(size != 0);
 760 }
 761 bool is_module_percpu_address(unsigned long addr)
 762 {
 763         return false;
 764 }
 765 
 766 bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
 767 {
 768         return false;
 769 }
 770 
 771 #endif /* CONFIG_SMP */
 772 
 773 #define MODINFO_ATTR(field)     \
 774 static void setup_modinfo_##field(struct module *mod, const char *s)  \
 775 {                                                                     \
 776         mod->field = kstrdup(s, GFP_KERNEL);                          \
 777 }                                                                     \
 778 static ssize_t show_modinfo_##field(struct module_attribute *mattr,   \
 779                         struct module_kobject *mk, char *buffer)      \
 780 {                                                                     \
 781         return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field);  \
 782 }                                                                     \
 783 static int modinfo_##field##_exists(struct module *mod)               \
 784 {                                                                     \
 785         return mod->field != NULL;                                    \
 786 }                                                                     \
 787 static void free_modinfo_##field(struct module *mod)                  \
 788 {                                                                     \
 789         kfree(mod->field);                                            \
 790         mod->field = NULL;                                            \
 791 }                                                                     \
 792 static struct module_attribute modinfo_##field = {                    \
 793         .attr = { .name = __stringify(field), .mode = 0444 },         \
 794         .show = show_modinfo_##field,                                 \
 795         .setup = setup_modinfo_##field,                               \
 796         .test = modinfo_##field##_exists,                             \
 797         .free = free_modinfo_##field,                                 \
 798 };
 799 
 800 MODINFO_ATTR(version);
 801 MODINFO_ATTR(srcversion);
 802 
 803 static char last_unloaded_module[MODULE_NAME_LEN+1];
 804 
 805 #ifdef CONFIG_MODULE_UNLOAD
 806 
 807 EXPORT_TRACEPOINT_SYMBOL(module_get);
 808 
 809 /* MODULE_REF_BASE is the base reference count by kmodule loader. */
 810 #define MODULE_REF_BASE 1
 811 
 812 /* Init the unload section of the module. */
 813 static int module_unload_init(struct module *mod)
 814 {
 815         /*
 816          * Initialize reference counter to MODULE_REF_BASE.
 817          * refcnt == 0 means module is going.
 818          */
 819         atomic_set(&mod->refcnt, MODULE_REF_BASE);
 820 
 821         INIT_LIST_HEAD(&mod->source_list);
 822         INIT_LIST_HEAD(&mod->target_list);
 823 
 824         /* Hold reference count during initialization. */
 825         atomic_inc(&mod->refcnt);
 826 
 827         return 0;
 828 }
 829 
 830 /* Does a already use b? */
 831 static int already_uses(struct module *a, struct module *b)
 832 {
 833         struct module_use *use;
 834 
 835         list_for_each_entry(use, &b->source_list, source_list) {
 836                 if (use->source == a) {
 837                         pr_debug("%s uses %s!\n", a->name, b->name);
 838                         return 1;
 839                 }
 840         }
 841         pr_debug("%s does not use %s!\n", a->name, b->name);
 842         return 0;
 843 }
 844 
 845 /*
 846  * Module a uses b
 847  *  - we add 'a' as a "source", 'b' as a "target" of module use
 848  *  - the module_use is added to the list of 'b' sources (so
 849  *    'b' can walk the list to see who sourced them), and of 'a'
 850  *    targets (so 'a' can see what modules it targets).
 851  */
 852 static int add_module_usage(struct module *a, struct module *b)
 853 {
 854         struct module_use *use;
 855 
 856         pr_debug("Allocating new usage for %s.\n", a->name);
 857         use = kmalloc(sizeof(*use), GFP_ATOMIC);
 858         if (!use)
 859                 return -ENOMEM;
 860 
 861         use->source = a;
 862         use->target = b;
 863         list_add(&use->source_list, &b->source_list);
 864         list_add(&use->target_list, &a->target_list);
 865         return 0;
 866 }
 867 
 868 /* Module a uses b: caller needs module_mutex() */
 869 int ref_module(struct module *a, struct module *b)
 870 {
 871         int err;
 872 
 873         if (b == NULL || already_uses(a, b))
 874                 return 0;
 875 
 876         /* If module isn't available, we fail. */
 877         err = strong_try_module_get(b);
 878         if (err)
 879                 return err;
 880 
 881         err = add_module_usage(a, b);
 882         if (err) {
 883                 module_put(b);
 884                 return err;
 885         }
 886         return 0;
 887 }
 888 EXPORT_SYMBOL_GPL(ref_module);
 889 
 890 /* Clear the unload stuff of the module. */
 891 static void module_unload_free(struct module *mod)
 892 {
 893         struct module_use *use, *tmp;
 894 
 895         mutex_lock(&module_mutex);
 896         list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
 897                 struct module *i = use->target;
 898                 pr_debug("%s unusing %s\n", mod->name, i->name);
 899                 module_put(i);
 900                 list_del(&use->source_list);
 901                 list_del(&use->target_list);
 902                 kfree(use);
 903         }
 904         mutex_unlock(&module_mutex);
 905 }
 906 
 907 #ifdef CONFIG_MODULE_FORCE_UNLOAD
 908 static inline int try_force_unload(unsigned int flags)
 909 {
 910         int ret = (flags & O_TRUNC);
 911         if (ret)
 912                 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
 913         return ret;
 914 }
 915 #else
 916 static inline int try_force_unload(unsigned int flags)
 917 {
 918         return 0;
 919 }
 920 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
 921 
 922 /* Try to release refcount of module, 0 means success. */
 923 static int try_release_module_ref(struct module *mod)
 924 {
 925         int ret;
 926 
 927         /* Try to decrement refcnt which we set at loading */
 928         ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
 929         BUG_ON(ret < 0);
 930         if (ret)
 931                 /* Someone can put this right now, recover with checking */
 932                 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
 933 
 934         return ret;
 935 }
 936 
 937 static int try_stop_module(struct module *mod, int flags, int *forced)
 938 {
 939         /* If it's not unused, quit unless we're forcing. */
 940         if (try_release_module_ref(mod) != 0) {
 941                 *forced = try_force_unload(flags);
 942                 if (!(*forced))
 943                         return -EWOULDBLOCK;
 944         }
 945 
 946         /* Mark it as dying. */
 947         mod->state = MODULE_STATE_GOING;
 948 
 949         return 0;
 950 }
 951 
 952 /**
 953  * module_refcount - return the refcount or -1 if unloading
 954  *
 955  * @mod:        the module we're checking
 956  *
 957  * Returns:
 958  *      -1 if the module is in the process of unloading
 959  *      otherwise the number of references in the kernel to the module
 960  */
 961 int module_refcount(struct module *mod)
 962 {
 963         return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
 964 }
 965 EXPORT_SYMBOL(module_refcount);
 966 
 967 /* This exists whether we can unload or not */
 968 static void free_module(struct module *mod);
 969 
 970 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
 971                 unsigned int, flags)
 972 {
 973         struct module *mod;
 974         char name[MODULE_NAME_LEN];
 975         int ret, forced = 0;
 976 
 977         if (!capable(CAP_SYS_MODULE) || modules_disabled)
 978                 return -EPERM;
 979 
 980         if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
 981                 return -EFAULT;
 982         name[MODULE_NAME_LEN-1] = '\0';
 983 
 984         audit_log_kern_module(name);
 985 
 986         if (mutex_lock_interruptible(&module_mutex) != 0)
 987                 return -EINTR;
 988 
 989         mod = find_module(name);
 990         if (!mod) {
 991                 ret = -ENOENT;
 992                 goto out;
 993         }
 994 
 995         if (!list_empty(&mod->source_list)) {
 996                 /* Other modules depend on us: get rid of them first. */
 997                 ret = -EWOULDBLOCK;
 998                 goto out;
 999         }
1000 
1001         /* Doing init or already dying? */
1002         if (mod->state != MODULE_STATE_LIVE) {
1003                 /* FIXME: if (force), slam module count damn the torpedoes */
1004                 pr_debug("%s already dying\n", mod->name);
1005                 ret = -EBUSY;
1006                 goto out;
1007         }
1008 
1009         /* If it has an init func, it must have an exit func to unload */
1010         if (mod->init && !mod->exit) {
1011                 forced = try_force_unload(flags);
1012                 if (!forced) {
1013                         /* This module can't be removed */
1014                         ret = -EBUSY;
1015                         goto out;
1016                 }
1017         }
1018 
1019         /* Stop the machine so refcounts can't move and disable module. */
1020         ret = try_stop_module(mod, flags, &forced);
1021         if (ret != 0)
1022                 goto out;
1023 
1024         mutex_unlock(&module_mutex);
1025         /* Final destruction now no one is using it. */
1026         if (mod->exit != NULL)
1027                 mod->exit();
1028         blocking_notifier_call_chain(&module_notify_list,
1029                                      MODULE_STATE_GOING, mod);
1030         klp_module_going(mod);
1031         ftrace_release_mod(mod);
1032 
1033         async_synchronize_full();
1034 
1035         /* Store the name of the last unloaded module for diagnostic purposes */
1036         strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
1037 
1038         free_module(mod);
1039         /* someone could wait for the module in add_unformed_module() */
1040         wake_up_all(&module_wq);
1041         return 0;
1042 out:
1043         mutex_unlock(&module_mutex);
1044         return ret;
1045 }
1046 
1047 static inline void print_unload_info(struct seq_file *m, struct module *mod)
1048 {
1049         struct module_use *use;
1050         int printed_something = 0;
1051 
1052         seq_printf(m, " %i ", module_refcount(mod));
1053 
1054         /*
1055          * Always include a trailing , so userspace can differentiate
1056          * between this and the old multi-field proc format.
1057          */
1058         list_for_each_entry(use, &mod->source_list, source_list) {
1059                 printed_something = 1;
1060                 seq_printf(m, "%s,", use->source->name);
1061         }
1062 
1063         if (mod->init != NULL && mod->exit == NULL) {
1064                 printed_something = 1;
1065                 seq_puts(m, "[permanent],");
1066         }
1067 
1068         if (!printed_something)
1069                 seq_puts(m, "-");
1070 }
1071 
1072 void __symbol_put(const char *symbol)
1073 {
1074         struct module *owner;
1075 
1076         preempt_disable();
1077         if (!find_symbol(symbol, &owner, NULL, true, false))
1078                 BUG();
1079         module_put(owner);
1080         preempt_enable();
1081 }
1082 EXPORT_SYMBOL(__symbol_put);
1083 
1084 /* Note this assumes addr is a function, which it currently always is. */
1085 void symbol_put_addr(void *addr)
1086 {
1087         struct module *modaddr;
1088         unsigned long a = (unsigned long)dereference_function_descriptor(addr);
1089 
1090         if (core_kernel_text(a))
1091                 return;
1092 
1093         /*
1094          * Even though we hold a reference on the module; we still need to
1095          * disable preemption in order to safely traverse the data structure.
1096          */
1097         preempt_disable();
1098         modaddr = __module_text_address(a);
1099         BUG_ON(!modaddr);
1100         module_put(modaddr);
1101         preempt_enable();
1102 }
1103 EXPORT_SYMBOL_GPL(symbol_put_addr);
1104 
1105 static ssize_t show_refcnt(struct module_attribute *mattr,
1106                            struct module_kobject *mk, char *buffer)
1107 {
1108         return sprintf(buffer, "%i\n", module_refcount(mk->mod));
1109 }
1110 
1111 static struct module_attribute modinfo_refcnt =
1112         __ATTR(refcnt, 0444, show_refcnt, NULL);
1113 
1114 void __module_get(struct module *module)
1115 {
1116         if (module) {
1117                 preempt_disable();
1118                 atomic_inc(&module->refcnt);
1119                 trace_module_get(module, _RET_IP_);
1120                 preempt_enable();
1121         }
1122 }
1123 EXPORT_SYMBOL(__module_get);
1124 
1125 bool try_module_get(struct module *module)
1126 {
1127         bool ret = true;
1128 
1129         if (module) {
1130                 preempt_disable();
1131                 /* Note: here, we can fail to get a reference */
1132                 if (likely(module_is_live(module) &&
1133                            atomic_inc_not_zero(&module->refcnt) != 0))
1134                         trace_module_get(module, _RET_IP_);
1135                 else
1136                         ret = false;
1137 
1138                 preempt_enable();
1139         }
1140         return ret;
1141 }
1142 EXPORT_SYMBOL(try_module_get);
1143 
1144 void module_put(struct module *module)
1145 {
1146         int ret;
1147 
1148         if (module) {
1149                 preempt_disable();
1150                 ret = atomic_dec_if_positive(&module->refcnt);
1151                 WARN_ON(ret < 0);       /* Failed to put refcount */
1152                 trace_module_put(module, _RET_IP_);
1153                 preempt_enable();
1154         }
1155 }
1156 EXPORT_SYMBOL(module_put);
1157 
1158 #else /* !CONFIG_MODULE_UNLOAD */
1159 static inline void print_unload_info(struct seq_file *m, struct module *mod)
1160 {
1161         /* We don't know the usage count, or what modules are using. */
1162         seq_puts(m, " - -");
1163 }
1164 
1165 static inline void module_unload_free(struct module *mod)
1166 {
1167 }
1168 
1169 int ref_module(struct module *a, struct module *b)
1170 {
1171         return strong_try_module_get(b);
1172 }
1173 EXPORT_SYMBOL_GPL(ref_module);
1174 
1175 static inline int module_unload_init(struct module *mod)
1176 {
1177         return 0;
1178 }
1179 #endif /* CONFIG_MODULE_UNLOAD */
1180 
1181 static size_t module_flags_taint(struct module *mod, char *buf)
1182 {
1183         size_t l = 0;
1184         int i;
1185 
1186         for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
1187                 if (taint_flags[i].module && test_bit(i, &mod->taints))
1188                         buf[l++] = taint_flags[i].c_true;
1189         }
1190 
1191         return l;
1192 }
1193 
1194 static ssize_t show_initstate(struct module_attribute *mattr,
1195                               struct module_kobject *mk, char *buffer)
1196 {
1197         const char *state = "unknown";
1198 
1199         switch (mk->mod->state) {
1200         case MODULE_STATE_LIVE:
1201                 state = "live";
1202                 break;
1203         case MODULE_STATE_COMING:
1204                 state = "coming";
1205                 break;
1206         case MODULE_STATE_GOING:
1207                 state = "going";
1208                 break;
1209         default:
1210                 BUG();
1211         }
1212         return sprintf(buffer, "%s\n", state);
1213 }
1214 
1215 static struct module_attribute modinfo_initstate =
1216         __ATTR(initstate, 0444, show_initstate, NULL);
1217 
1218 static ssize_t store_uevent(struct module_attribute *mattr,
1219                             struct module_kobject *mk,
1220                             const char *buffer, size_t count)
1221 {
1222         int rc;
1223 
1224         rc = kobject_synth_uevent(&mk->kobj, buffer, count);
1225         return rc ? rc : count;
1226 }
1227 
1228 struct module_attribute module_uevent =
1229         __ATTR(uevent, 0200, NULL, store_uevent);
1230 
1231 static ssize_t show_coresize(struct module_attribute *mattr,
1232                              struct module_kobject *mk, char *buffer)
1233 {
1234         return sprintf(buffer, "%u\n", mk->mod->core_layout.size);
1235 }
1236 
1237 static struct module_attribute modinfo_coresize =
1238         __ATTR(coresize, 0444, show_coresize, NULL);
1239 
1240 static ssize_t show_initsize(struct module_attribute *mattr,
1241                              struct module_kobject *mk, char *buffer)
1242 {
1243         return sprintf(buffer, "%u\n", mk->mod->init_layout.size);
1244 }
1245 
1246 static struct module_attribute modinfo_initsize =
1247         __ATTR(initsize, 0444, show_initsize, NULL);
1248 
1249 static ssize_t show_taint(struct module_attribute *mattr,
1250                           struct module_kobject *mk, char *buffer)
1251 {
1252         size_t l;
1253 
1254         l = module_flags_taint(mk->mod, buffer);
1255         buffer[l++] = '\n';
1256         return l;
1257 }
1258 
1259 static struct module_attribute modinfo_taint =
1260         __ATTR(taint, 0444, show_taint, NULL);
1261 
1262 static struct module_attribute *modinfo_attrs[] = {
1263         &module_uevent,
1264         &modinfo_version,
1265         &modinfo_srcversion,
1266         &modinfo_initstate,
1267         &modinfo_coresize,
1268         &modinfo_initsize,
1269         &modinfo_taint,
1270 #ifdef CONFIG_MODULE_UNLOAD
1271         &modinfo_refcnt,
1272 #endif
1273         NULL,
1274 };
1275 
1276 static const char vermagic[] = VERMAGIC_STRING;
1277 
1278 static int try_to_force_load(struct module *mod, const char *reason)
1279 {
1280 #ifdef CONFIG_MODULE_FORCE_LOAD
1281         if (!test_taint(TAINT_FORCED_MODULE))
1282                 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1283         add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1284         return 0;
1285 #else
1286         return -ENOEXEC;
1287 #endif
1288 }
1289 
1290 #ifdef CONFIG_MODVERSIONS
1291 
1292 static u32 resolve_rel_crc(const s32 *crc)
1293 {
1294         return *(u32 *)((void *)crc + *crc);
1295 }
1296 
1297 static int check_version(const struct load_info *info,
1298                          const char *symname,
1299                          struct module *mod,
1300                          const s32 *crc)
1301 {
1302         Elf_Shdr *sechdrs = info->sechdrs;
1303         unsigned int versindex = info->index.vers;
1304         unsigned int i, num_versions;
1305         struct modversion_info *versions;
1306 
1307         /* Exporting module didn't supply crcs?  OK, we're already tainted. */
1308         if (!crc)
1309                 return 1;
1310 
1311         /* No versions at all?  modprobe --force does this. */
1312         if (versindex == 0)
1313                 return try_to_force_load(mod, symname) == 0;
1314 
1315         versions = (void *) sechdrs[versindex].sh_addr;
1316         num_versions = sechdrs[versindex].sh_size
1317                 / sizeof(struct modversion_info);
1318 
1319         for (i = 0; i < num_versions; i++) {
1320                 u32 crcval;
1321 
1322                 if (strcmp(versions[i].name, symname) != 0)
1323                         continue;
1324 
1325                 if (IS_ENABLED(CONFIG_MODULE_REL_CRCS))
1326                         crcval = resolve_rel_crc(crc);
1327                 else
1328                         crcval = *crc;
1329                 if (versions[i].crc == crcval)
1330                         return 1;
1331                 pr_debug("Found checksum %X vs module %lX\n",
1332                          crcval, versions[i].crc);
1333                 goto bad_version;
1334         }
1335 
1336         /* Broken toolchain. Warn once, then let it go.. */
1337         pr_warn_once("%s: no symbol version for %s\n", info->name, symname);
1338         return 1;
1339 
1340 bad_version:
1341         pr_warn("%s: disagrees about version of symbol %s\n",
1342                info->name, symname);
1343         return 0;
1344 }
1345 
1346 static inline int check_modstruct_version(const struct load_info *info,
1347                                           struct module *mod)
1348 {
1349         const s32 *crc;
1350 
1351         /*
1352          * Since this should be found in kernel (which can't be removed), no
1353          * locking is necessary -- use preempt_disable() to placate lockdep.
1354          */
1355         preempt_disable();
1356         if (!find_symbol("module_layout", NULL, &crc, true, false)) {
1357                 preempt_enable();
1358                 BUG();
1359         }
1360         preempt_enable();
1361         return check_version(info, "module_layout", mod, crc);
1362 }
1363 
1364 /* First part is kernel version, which we ignore if module has crcs. */
1365 static inline int same_magic(const char *amagic, const char *bmagic,
1366                              bool has_crcs)
1367 {
1368         if (has_crcs) {
1369                 amagic += strcspn(amagic, " ");
1370                 bmagic += strcspn(bmagic, " ");
1371         }
1372         return strcmp(amagic, bmagic) == 0;
1373 }
1374 #else
1375 static inline int check_version(const struct load_info *info,
1376                                 const char *symname,
1377                                 struct module *mod,
1378                                 const s32 *crc)
1379 {
1380         return 1;
1381 }
1382 
1383 static inline int check_modstruct_version(const struct load_info *info,
1384                                           struct module *mod)
1385 {
1386         return 1;
1387 }
1388 
1389 static inline int same_magic(const char *amagic, const char *bmagic,
1390                              bool has_crcs)
1391 {
1392         return strcmp(amagic, bmagic) == 0;
1393 }
1394 #endif /* CONFIG_MODVERSIONS */
1395 
1396 static char *get_modinfo(const struct load_info *info, const char *tag);
1397 static char *get_next_modinfo(const struct load_info *info, const char *tag,
1398                               char *prev);
1399 
1400 static int verify_namespace_is_imported(const struct load_info *info,
1401                                         const struct kernel_symbol *sym,
1402                                         struct module *mod)
1403 {
1404         const char *namespace;
1405         char *imported_namespace;
1406 
1407         namespace = kernel_symbol_namespace(sym);
1408         if (namespace) {
1409                 imported_namespace = get_modinfo(info, "import_ns");
1410                 while (imported_namespace) {
1411                         if (strcmp(namespace, imported_namespace) == 0)
1412                                 return 0;
1413                         imported_namespace = get_next_modinfo(
1414                                 info, "import_ns", imported_namespace);
1415                 }
1416 #ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1417                 pr_warn(
1418 #else
1419                 pr_err(
1420 #endif
1421                         "%s: module uses symbol (%s) from namespace %s, but does not import it.\n",
1422                         mod->name, kernel_symbol_name(sym), namespace);
1423 #ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1424                 return -EINVAL;
1425 #endif
1426         }
1427         return 0;
1428 }
1429 
1430 
1431 /* Resolve a symbol for this module.  I.e. if we find one, record usage. */
1432 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1433                                                   const struct load_info *info,
1434                                                   const char *name,
1435                                                   char ownername[])
1436 {
1437         struct module *owner;
1438         const struct kernel_symbol *sym;
1439         const s32 *crc;
1440         int err;
1441 
1442         /*
1443          * The module_mutex should not be a heavily contended lock;
1444          * if we get the occasional sleep here, we'll go an extra iteration
1445          * in the wait_event_interruptible(), which is harmless.
1446          */
1447         sched_annotate_sleep();
1448         mutex_lock(&module_mutex);
1449         sym = find_symbol(name, &owner, &crc,
1450                           !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1451         if (!sym)
1452                 goto unlock;
1453 
1454         if (!check_version(info, name, mod, crc)) {
1455                 sym = ERR_PTR(-EINVAL);
1456                 goto getname;
1457         }
1458 
1459         err = verify_namespace_is_imported(info, sym, mod);
1460         if (err) {
1461                 sym = ERR_PTR(err);
1462                 goto getname;
1463         }
1464 
1465         err = ref_module(mod, owner);
1466         if (err) {
1467                 sym = ERR_PTR(err);
1468                 goto getname;
1469         }
1470 
1471 getname:
1472         /* We must make copy under the lock if we failed to get ref. */
1473         strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1474 unlock:
1475         mutex_unlock(&module_mutex);
1476         return sym;
1477 }
1478 
1479 static const struct kernel_symbol *
1480 resolve_symbol_wait(struct module *mod,
1481                     const struct load_info *info,
1482                     const char *name)
1483 {
1484         const struct kernel_symbol *ksym;
1485         char owner[MODULE_NAME_LEN];
1486 
1487         if (wait_event_interruptible_timeout(module_wq,
1488                         !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1489                         || PTR_ERR(ksym) != -EBUSY,
1490                                              30 * HZ) <= 0) {
1491                 pr_warn("%s: gave up waiting for init of module %s.\n",
1492                         mod->name, owner);
1493         }
1494         return ksym;
1495 }
1496 
1497 /*
1498  * /sys/module/foo/sections stuff
1499  * J. Corbet <corbet@lwn.net>
1500  */
1501 #ifdef CONFIG_SYSFS
1502 
1503 #ifdef CONFIG_KALLSYMS
1504 static inline bool sect_empty(const Elf_Shdr *sect)
1505 {
1506         return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1507 }
1508 
1509 struct module_sect_attr {
1510         struct module_attribute mattr;
1511         char *name;
1512         unsigned long address;
1513 };
1514 
1515 struct module_sect_attrs {
1516         struct attribute_group grp;
1517         unsigned int nsections;
1518         struct module_sect_attr attrs[0];
1519 };
1520 
1521 static ssize_t module_sect_show(struct module_attribute *mattr,
1522                                 struct module_kobject *mk, char *buf)
1523 {
1524         struct module_sect_attr *sattr =
1525                 container_of(mattr, struct module_sect_attr, mattr);
1526         return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
1527                        (void *)sattr->address : NULL);
1528 }
1529 
1530 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1531 {
1532         unsigned int section;
1533 
1534         for (section = 0; section < sect_attrs->nsections; section++)
1535                 kfree(sect_attrs->attrs[section].name);
1536         kfree(sect_attrs);
1537 }
1538 
1539 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1540 {
1541         unsigned int nloaded = 0, i, size[2];
1542         struct module_sect_attrs *sect_attrs;
1543         struct module_sect_attr *sattr;
1544         struct attribute **gattr;
1545 
1546         /* Count loaded sections and allocate structures */
1547         for (i = 0; i < info->hdr->e_shnum; i++)
1548                 if (!sect_empty(&info->sechdrs[i]))
1549                         nloaded++;
1550         size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded),
1551                         sizeof(sect_attrs->grp.attrs[0]));
1552         size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1553         sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1554         if (sect_attrs == NULL)
1555                 return;
1556 
1557         /* Setup section attributes. */
1558         sect_attrs->grp.name = "sections";
1559         sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1560 
1561         sect_attrs->nsections = 0;
1562         sattr = &sect_attrs->attrs[0];
1563         gattr = &sect_attrs->grp.attrs[0];
1564         for (i = 0; i < info->hdr->e_shnum; i++) {
1565                 Elf_Shdr *sec = &info->sechdrs[i];
1566                 if (sect_empty(sec))
1567                         continue;
1568                 sattr->address = sec->sh_addr;
1569                 sattr->name = kstrdup(info->secstrings + sec->sh_name,
1570                                         GFP_KERNEL);
1571                 if (sattr->name == NULL)
1572                         goto out;
1573                 sect_attrs->nsections++;
1574                 sysfs_attr_init(&sattr->mattr.attr);
1575                 sattr->mattr.show = module_sect_show;
1576                 sattr->mattr.store = NULL;
1577                 sattr->mattr.attr.name = sattr->name;
1578                 sattr->mattr.attr.mode = S_IRUSR;
1579                 *(gattr++) = &(sattr++)->mattr.attr;
1580         }
1581         *gattr = NULL;
1582 
1583         if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1584                 goto out;
1585 
1586         mod->sect_attrs = sect_attrs;
1587         return;
1588   out:
1589         free_sect_attrs(sect_attrs);
1590 }
1591 
1592 static void remove_sect_attrs(struct module *mod)
1593 {
1594         if (mod->sect_attrs) {
1595                 sysfs_remove_group(&mod->mkobj.kobj,
1596                                    &mod->sect_attrs->grp);
1597                 /* We are positive that no one is using any sect attrs
1598                  * at this point.  Deallocate immediately. */
1599                 free_sect_attrs(mod->sect_attrs);
1600                 mod->sect_attrs = NULL;
1601         }
1602 }
1603 
1604 /*
1605  * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1606  */
1607 
1608 struct module_notes_attrs {
1609         struct kobject *dir;
1610         unsigned int notes;
1611         struct bin_attribute attrs[0];
1612 };
1613 
1614 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1615                                  struct bin_attribute *bin_attr,
1616                                  char *buf, loff_t pos, size_t count)
1617 {
1618         /*
1619          * The caller checked the pos and count against our size.
1620          */
1621         memcpy(buf, bin_attr->private + pos, count);
1622         return count;
1623 }
1624 
1625 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1626                              unsigned int i)
1627 {
1628         if (notes_attrs->dir) {
1629                 while (i-- > 0)
1630                         sysfs_remove_bin_file(notes_attrs->dir,
1631                                               &notes_attrs->attrs[i]);
1632                 kobject_put(notes_attrs->dir);
1633         }
1634         kfree(notes_attrs);
1635 }
1636 
1637 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1638 {
1639         unsigned int notes, loaded, i;
1640         struct module_notes_attrs *notes_attrs;
1641         struct bin_attribute *nattr;
1642 
1643         /* failed to create section attributes, so can't create notes */
1644         if (!mod->sect_attrs)
1645                 return;
1646 
1647         /* Count notes sections and allocate structures.  */
1648         notes = 0;
1649         for (i = 0; i < info->hdr->e_shnum; i++)
1650                 if (!sect_empty(&info->sechdrs[i]) &&
1651                     (info->sechdrs[i].sh_type == SHT_NOTE))
1652                         ++notes;
1653 
1654         if (notes == 0)
1655                 return;
1656 
1657         notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes),
1658                               GFP_KERNEL);
1659         if (notes_attrs == NULL)
1660                 return;
1661 
1662         notes_attrs->notes = notes;
1663         nattr = &notes_attrs->attrs[0];
1664         for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1665                 if (sect_empty(&info->sechdrs[i]))
1666                         continue;
1667                 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1668                         sysfs_bin_attr_init(nattr);
1669                         nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1670                         nattr->attr.mode = S_IRUGO;
1671                         nattr->size = info->sechdrs[i].sh_size;
1672                         nattr->private = (void *) info->sechdrs[i].sh_addr;
1673                         nattr->read = module_notes_read;
1674                         ++nattr;
1675                 }
1676                 ++loaded;
1677         }
1678 
1679         notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1680         if (!notes_attrs->dir)
1681                 goto out;
1682 
1683         for (i = 0; i < notes; ++i)
1684                 if (sysfs_create_bin_file(notes_attrs->dir,
1685                                           &notes_attrs->attrs[i]))
1686                         goto out;
1687 
1688         mod->notes_attrs = notes_attrs;
1689         return;
1690 
1691   out:
1692         free_notes_attrs(notes_attrs, i);
1693 }
1694 
1695 static void remove_notes_attrs(struct module *mod)
1696 {
1697         if (mod->notes_attrs)
1698                 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1699 }
1700 
1701 #else
1702 
1703 static inline void add_sect_attrs(struct module *mod,
1704                                   const struct load_info *info)
1705 {
1706 }
1707 
1708 static inline void remove_sect_attrs(struct module *mod)
1709 {
1710 }
1711 
1712 static inline void add_notes_attrs(struct module *mod,
1713                                    const struct load_info *info)
1714 {
1715 }
1716 
1717 static inline void remove_notes_attrs(struct module *mod)
1718 {
1719 }
1720 #endif /* CONFIG_KALLSYMS */
1721 
1722 static void del_usage_links(struct module *mod)
1723 {
1724 #ifdef CONFIG_MODULE_UNLOAD
1725         struct module_use *use;
1726 
1727         mutex_lock(&module_mutex);
1728         list_for_each_entry(use, &mod->target_list, target_list)
1729                 sysfs_remove_link(use->target->holders_dir, mod->name);
1730         mutex_unlock(&module_mutex);
1731 #endif
1732 }
1733 
1734 static int add_usage_links(struct module *mod)
1735 {
1736         int ret = 0;
1737 #ifdef CONFIG_MODULE_UNLOAD
1738         struct module_use *use;
1739 
1740         mutex_lock(&module_mutex);
1741         list_for_each_entry(use, &mod->target_list, target_list) {
1742                 ret = sysfs_create_link(use->target->holders_dir,
1743                                         &mod->mkobj.kobj, mod->name);
1744                 if (ret)
1745                         break;
1746         }
1747         mutex_unlock(&module_mutex);
1748         if (ret)
1749                 del_usage_links(mod);
1750 #endif
1751         return ret;
1752 }
1753 
1754 static void module_remove_modinfo_attrs(struct module *mod, int end);
1755 
1756 static int module_add_modinfo_attrs(struct module *mod)
1757 {
1758         struct module_attribute *attr;
1759         struct module_attribute *temp_attr;
1760         int error = 0;
1761         int i;
1762 
1763         mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1764                                         (ARRAY_SIZE(modinfo_attrs) + 1)),
1765                                         GFP_KERNEL);
1766         if (!mod->modinfo_attrs)
1767                 return -ENOMEM;
1768 
1769         temp_attr = mod->modinfo_attrs;
1770         for (i = 0; (attr = modinfo_attrs[i]); i++) {
1771                 if (!attr->test || attr->test(mod)) {
1772                         memcpy(temp_attr, attr, sizeof(*temp_attr));
1773                         sysfs_attr_init(&temp_attr->attr);
1774                         error = sysfs_create_file(&mod->mkobj.kobj,
1775                                         &temp_attr->attr);
1776                         if (error)
1777                                 goto error_out;
1778                         ++temp_attr;
1779                 }
1780         }
1781 
1782         return 0;
1783 
1784 error_out:
1785         if (i > 0)
1786                 module_remove_modinfo_attrs(mod, --i);
1787         else
1788                 kfree(mod->modinfo_attrs);
1789         return error;
1790 }
1791 
1792 static void module_remove_modinfo_attrs(struct module *mod, int end)
1793 {
1794         struct module_attribute *attr;
1795         int i;
1796 
1797         for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1798                 if (end >= 0 && i > end)
1799                         break;
1800                 /* pick a field to test for end of list */
1801                 if (!attr->attr.name)
1802                         break;
1803                 sysfs_remove_file(&mod->mkobj.kobj, &attr->attr);
1804                 if (attr->free)
1805                         attr->free(mod);
1806         }
1807         kfree(mod->modinfo_attrs);
1808 }
1809 
1810 static void mod_kobject_put(struct module *mod)
1811 {
1812         DECLARE_COMPLETION_ONSTACK(c);
1813         mod->mkobj.kobj_completion = &c;
1814         kobject_put(&mod->mkobj.kobj);
1815         wait_for_completion(&c);
1816 }
1817 
1818 static int mod_sysfs_init(struct module *mod)
1819 {
1820         int err;
1821         struct kobject *kobj;
1822 
1823         if (!module_sysfs_initialized) {
1824                 pr_err("%s: module sysfs not initialized\n", mod->name);
1825                 err = -EINVAL;
1826                 goto out;
1827         }
1828 
1829         kobj = kset_find_obj(module_kset, mod->name);
1830         if (kobj) {
1831                 pr_err("%s: module is already loaded\n", mod->name);
1832                 kobject_put(kobj);
1833                 err = -EINVAL;
1834                 goto out;
1835         }
1836 
1837         mod->mkobj.mod = mod;
1838 
1839         memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1840         mod->mkobj.kobj.kset = module_kset;
1841         err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1842                                    "%s", mod->name);
1843         if (err)
1844                 mod_kobject_put(mod);
1845 
1846         /* delay uevent until full sysfs population */
1847 out:
1848         return err;
1849 }
1850 
1851 static int mod_sysfs_setup(struct module *mod,
1852                            const struct load_info *info,
1853                            struct kernel_param *kparam,
1854                            unsigned int num_params)
1855 {
1856         int err;
1857 
1858         err = mod_sysfs_init(mod);
1859         if (err)
1860                 goto out;
1861 
1862         mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1863         if (!mod->holders_dir) {
1864                 err = -ENOMEM;
1865                 goto out_unreg;
1866         }
1867 
1868         err = module_param_sysfs_setup(mod, kparam, num_params);
1869         if (err)
1870                 goto out_unreg_holders;
1871 
1872         err = module_add_modinfo_attrs(mod);
1873         if (err)
1874                 goto out_unreg_param;
1875 
1876         err = add_usage_links(mod);
1877         if (err)
1878                 goto out_unreg_modinfo_attrs;
1879 
1880         add_sect_attrs(mod, info);
1881         add_notes_attrs(mod, info);
1882 
1883         kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1884         return 0;
1885 
1886 out_unreg_modinfo_attrs:
1887         module_remove_modinfo_attrs(mod, -1);
1888 out_unreg_param:
1889         module_param_sysfs_remove(mod);
1890 out_unreg_holders:
1891         kobject_put(mod->holders_dir);
1892 out_unreg:
1893         mod_kobject_put(mod);
1894 out:
1895         return err;
1896 }
1897 
1898 static void mod_sysfs_fini(struct module *mod)
1899 {
1900         remove_notes_attrs(mod);
1901         remove_sect_attrs(mod);
1902         mod_kobject_put(mod);
1903 }
1904 
1905 static void init_param_lock(struct module *mod)
1906 {
1907         mutex_init(&mod->param_lock);
1908 }
1909 #else /* !CONFIG_SYSFS */
1910 
1911 static int mod_sysfs_setup(struct module *mod,
1912                            const struct load_info *info,
1913                            struct kernel_param *kparam,
1914                            unsigned int num_params)
1915 {
1916         return 0;
1917 }
1918 
1919 static void mod_sysfs_fini(struct module *mod)
1920 {
1921 }
1922 
1923 static void module_remove_modinfo_attrs(struct module *mod, int end)
1924 {
1925 }
1926 
1927 static void del_usage_links(struct module *mod)
1928 {
1929 }
1930 
1931 static void init_param_lock(struct module *mod)
1932 {
1933 }
1934 #endif /* CONFIG_SYSFS */
1935 
1936 static void mod_sysfs_teardown(struct module *mod)
1937 {
1938         del_usage_links(mod);
1939         module_remove_modinfo_attrs(mod, -1);
1940         module_param_sysfs_remove(mod);
1941         kobject_put(mod->mkobj.drivers_dir);
1942         kobject_put(mod->holders_dir);
1943         mod_sysfs_fini(mod);
1944 }
1945 
1946 #ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
1947 /*
1948  * LKM RO/NX protection: protect module's text/ro-data
1949  * from modification and any data from execution.
1950  *
1951  * General layout of module is:
1952  *          [text] [read-only-data] [ro-after-init] [writable data]
1953  * text_size -----^                ^               ^               ^
1954  * ro_size ------------------------|               |               |
1955  * ro_after_init_size -----------------------------|               |
1956  * size -----------------------------------------------------------|
1957  *
1958  * These values are always page-aligned (as is base)
1959  */
1960 static void frob_text(const struct module_layout *layout,
1961                       int (*set_memory)(unsigned long start, int num_pages))
1962 {
1963         BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1964         BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
1965         set_memory((unsigned long)layout->base,
1966                    layout->text_size >> PAGE_SHIFT);
1967 }
1968 
1969 #ifdef CONFIG_STRICT_MODULE_RWX
1970 static void frob_rodata(const struct module_layout *layout,
1971                         int (*set_memory)(unsigned long start, int num_pages))
1972 {
1973         BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1974         BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
1975         BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
1976         set_memory((unsigned long)layout->base + layout->text_size,
1977                    (layout->ro_size - layout->text_size) >> PAGE_SHIFT);
1978 }
1979 
1980 static void frob_ro_after_init(const struct module_layout *layout,
1981                                 int (*set_memory)(unsigned long start, int num_pages))
1982 {
1983         BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1984         BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
1985         BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
1986         set_memory((unsigned long)layout->base + layout->ro_size,
1987                    (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT);
1988 }
1989 
1990 static void frob_writable_data(const struct module_layout *layout,
1991                                int (*set_memory)(unsigned long start, int num_pages))
1992 {
1993         BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1994         BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
1995         BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1));
1996         set_memory((unsigned long)layout->base + layout->ro_after_init_size,
1997                    (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
1998 }
1999 
2000 /* livepatching wants to disable read-only so it can frob module. */
2001 void module_disable_ro(const struct module *mod)
2002 {
2003         if (!rodata_enabled)
2004                 return;
2005 
2006         frob_text(&mod->core_layout, set_memory_rw);
2007         frob_rodata(&mod->core_layout, set_memory_rw);
2008         frob_ro_after_init(&mod->core_layout, set_memory_rw);
2009         frob_text(&mod->init_layout, set_memory_rw);
2010         frob_rodata(&mod->init_layout, set_memory_rw);
2011 }
2012 
2013 void module_enable_ro(const struct module *mod, bool after_init)
2014 {
2015         if (!rodata_enabled)
2016                 return;
2017 
2018         set_vm_flush_reset_perms(mod->core_layout.base);
2019         set_vm_flush_reset_perms(mod->init_layout.base);
2020         frob_text(&mod->core_layout, set_memory_ro);
2021 
2022         frob_rodata(&mod->core_layout, set_memory_ro);
2023         frob_text(&mod->init_layout, set_memory_ro);
2024         frob_rodata(&mod->init_layout, set_memory_ro);
2025 
2026         if (after_init)
2027                 frob_ro_after_init(&mod->core_layout, set_memory_ro);
2028 }
2029 
2030 static void module_enable_nx(const struct module *mod)
2031 {
2032         frob_rodata(&mod->core_layout, set_memory_nx);
2033         frob_ro_after_init(&mod->core_layout, set_memory_nx);
2034         frob_writable_data(&mod->core_layout, set_memory_nx);
2035         frob_rodata(&mod->init_layout, set_memory_nx);
2036         frob_writable_data(&mod->init_layout, set_memory_nx);
2037 }
2038 
2039 /* Iterate through all modules and set each module's text as RW */
2040 void set_all_modules_text_rw(void)
2041 {
2042         struct module *mod;
2043 
2044         if (!rodata_enabled)
2045                 return;
2046 
2047         mutex_lock(&module_mutex);
2048         list_for_each_entry_rcu(mod, &modules, list) {
2049                 if (mod->state == MODULE_STATE_UNFORMED)
2050                         continue;
2051 
2052                 frob_text(&mod->core_layout, set_memory_rw);
2053                 frob_text(&mod->init_layout, set_memory_rw);
2054         }
2055         mutex_unlock(&module_mutex);
2056 }
2057 
2058 /* Iterate through all modules and set each module's text as RO */
2059 void set_all_modules_text_ro(void)
2060 {
2061         struct module *mod;
2062 
2063         if (!rodata_enabled)
2064                 return;
2065 
2066         mutex_lock(&module_mutex);
2067         list_for_each_entry_rcu(mod, &modules, list) {
2068                 /*
2069                  * Ignore going modules since it's possible that ro
2070                  * protection has already been disabled, otherwise we'll
2071                  * run into protection faults at module deallocation.
2072                  */
2073                 if (mod->state == MODULE_STATE_UNFORMED ||
2074                         mod->state == MODULE_STATE_GOING)
2075                         continue;
2076 
2077                 frob_text(&mod->core_layout, set_memory_ro);
2078                 frob_text(&mod->init_layout, set_memory_ro);
2079         }
2080         mutex_unlock(&module_mutex);
2081 }
2082 #else /* !CONFIG_STRICT_MODULE_RWX */
2083 static void module_enable_nx(const struct module *mod) { }
2084 #endif /*  CONFIG_STRICT_MODULE_RWX */
2085 static void module_enable_x(const struct module *mod)
2086 {
2087         frob_text(&mod->core_layout, set_memory_x);
2088         frob_text(&mod->init_layout, set_memory_x);
2089 }
2090 #else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2091 static void module_enable_nx(const struct module *mod) { }
2092 static void module_enable_x(const struct module *mod) { }
2093 #endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2094 
2095 
2096 #ifdef CONFIG_LIVEPATCH
2097 /*
2098  * Persist Elf information about a module. Copy the Elf header,
2099  * section header table, section string table, and symtab section
2100  * index from info to mod->klp_info.
2101  */
2102 static int copy_module_elf(struct module *mod, struct load_info *info)
2103 {
2104         unsigned int size, symndx;
2105         int ret;
2106 
2107         size = sizeof(*mod->klp_info);
2108         mod->klp_info = kmalloc(size, GFP_KERNEL);
2109         if (mod->klp_info == NULL)
2110                 return -ENOMEM;
2111 
2112         /* Elf header */
2113         size = sizeof(mod->klp_info->hdr);
2114         memcpy(&mod->klp_info->hdr, info->hdr, size);
2115 
2116         /* Elf section header table */
2117         size = sizeof(*info->sechdrs) * info->hdr->e_shnum;
2118         mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL);
2119         if (mod->klp_info->sechdrs == NULL) {
2120                 ret = -ENOMEM;
2121                 goto free_info;
2122         }
2123 
2124         /* Elf section name string table */
2125         size = info->sechdrs[info->hdr->e_shstrndx].sh_size;
2126         mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL);
2127         if (mod->klp_info->secstrings == NULL) {
2128                 ret = -ENOMEM;
2129                 goto free_sechdrs;
2130         }
2131 
2132         /* Elf symbol section index */
2133         symndx = info->index.sym;
2134         mod->klp_info->symndx = symndx;
2135 
2136         /*
2137          * For livepatch modules, core_kallsyms.symtab is a complete
2138          * copy of the original symbol table. Adjust sh_addr to point
2139          * to core_kallsyms.symtab since the copy of the symtab in module
2140          * init memory is freed at the end of do_init_module().
2141          */
2142         mod->klp_info->sechdrs[symndx].sh_addr = \
2143                 (unsigned long) mod->core_kallsyms.symtab;
2144 
2145         return 0;
2146 
2147 free_sechdrs:
2148         kfree(mod->klp_info->sechdrs);
2149 free_info:
2150         kfree(mod->klp_info);
2151         return ret;
2152 }
2153 
2154 static void free_module_elf(struct module *mod)
2155 {
2156         kfree(mod->klp_info->sechdrs);
2157         kfree(mod->klp_info->secstrings);
2158         kfree(mod->klp_info);
2159 }
2160 #else /* !CONFIG_LIVEPATCH */
2161 static int copy_module_elf(struct module *mod, struct load_info *info)
2162 {
2163         return 0;
2164 }
2165 
2166 static void free_module_elf(struct module *mod)
2167 {
2168 }
2169 #endif /* CONFIG_LIVEPATCH */
2170 
2171 void __weak module_memfree(void *module_region)
2172 {
2173         /*
2174          * This memory may be RO, and freeing RO memory in an interrupt is not
2175          * supported by vmalloc.
2176          */
2177         WARN_ON(in_interrupt());
2178         vfree(module_region);
2179 }
2180 
2181 void __weak module_arch_cleanup(struct module *mod)
2182 {
2183 }
2184 
2185 void __weak module_arch_freeing_init(struct module *mod)
2186 {
2187 }
2188 
2189 /* Free a module, remove from lists, etc. */
2190 static void free_module(struct module *mod)
2191 {
2192         trace_module_free(mod);
2193 
2194         mod_sysfs_teardown(mod);
2195 
2196         /* We leave it in list to prevent duplicate loads, but make sure
2197          * that noone uses it while it's being deconstructed. */
2198         mutex_lock(&module_mutex);
2199         mod->state = MODULE_STATE_UNFORMED;
2200         mutex_unlock(&module_mutex);
2201 
2202         /* Remove dynamic debug info */
2203         ddebug_remove_module(mod->name);
2204 
2205         /* Arch-specific cleanup. */
2206         module_arch_cleanup(mod);
2207 
2208         /* Module unload stuff */
2209         module_unload_free(mod);
2210 
2211         /* Free any allocated parameters. */
2212         destroy_params(mod->kp, mod->num_kp);
2213 
2214         if (is_livepatch_module(mod))
2215                 free_module_elf(mod);
2216 
2217         /* Now we can delete it from the lists */
2218         mutex_lock(&module_mutex);
2219         /* Unlink carefully: kallsyms could be walking list. */
2220         list_del_rcu(&mod->list);
2221         mod_tree_remove(mod);
2222         /* Remove this module from bug list, this uses list_del_rcu */
2223         module_bug_cleanup(mod);
2224         /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
2225         synchronize_rcu();
2226         mutex_unlock(&module_mutex);
2227 
2228         /* This may be empty, but that's OK */
2229         module_arch_freeing_init(mod);
2230         module_memfree(mod->init_layout.base);
2231         kfree(mod->args);
2232         percpu_modfree(mod);
2233 
2234         /* Free lock-classes; relies on the preceding sync_rcu(). */
2235         lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
2236 
2237         /* Finally, free the core (containing the module structure) */
2238         module_memfree(mod->core_layout.base);
2239 }
2240 
2241 void *__symbol_get(const char *symbol)
2242 {
2243         struct module *owner;
2244         const struct kernel_symbol *sym;
2245 
2246         preempt_disable();
2247         sym = find_symbol(symbol, &owner, NULL, true, true);
2248         if (sym && strong_try_module_get(owner))
2249                 sym = NULL;
2250         preempt_enable();
2251 
2252         return sym ? (void *)kernel_symbol_value(sym) : NULL;
2253 }
2254 EXPORT_SYMBOL_GPL(__symbol_get);
2255 
2256 /*
2257  * Ensure that an exported symbol [global namespace] does not already exist
2258  * in the kernel or in some other module's exported symbol table.
2259  *
2260  * You must hold the module_mutex.
2261  */
2262 static int verify_exported_symbols(struct module *mod)
2263 {
2264         unsigned int i;
2265         struct module *owner;
2266         const struct kernel_symbol *s;
2267         struct {
2268                 const struct kernel_symbol *sym;
2269                 unsigned int num;
2270         } arr[] = {
2271                 { mod->syms, mod->num_syms },
2272                 { mod->gpl_syms, mod->num_gpl_syms },
2273                 { mod->gpl_future_syms, mod->num_gpl_future_syms },
2274 #ifdef CONFIG_UNUSED_SYMBOLS
2275                 { mod->unused_syms, mod->num_unused_syms },
2276                 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
2277 #endif
2278         };
2279 
2280         for (i = 0; i < ARRAY_SIZE(arr); i++) {
2281                 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
2282                         if (find_symbol(kernel_symbol_name(s), &owner, NULL,
2283                                         true, false)) {
2284                                 pr_err("%s: exports duplicate symbol %s"
2285                                        " (owned by %s)\n",
2286                                        mod->name, kernel_symbol_name(s),
2287                                        module_name(owner));
2288                                 return -ENOEXEC;
2289                         }
2290                 }
2291         }
2292         return 0;
2293 }
2294 
2295 /* Change all symbols so that st_value encodes the pointer directly. */
2296 static int simplify_symbols(struct module *mod, const struct load_info *info)
2297 {
2298         Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2299         Elf_Sym *sym = (void *)symsec->sh_addr;
2300         unsigned long secbase;
2301         unsigned int i;
2302         int ret = 0;
2303         const struct kernel_symbol *ksym;
2304 
2305         for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
2306                 const char *name = info->strtab + sym[i].st_name;
2307 
2308                 switch (sym[i].st_shndx) {
2309                 case SHN_COMMON:
2310                         /* Ignore common symbols */
2311                         if (!strncmp(name, "__gnu_lto", 9))
2312                                 break;
2313 
2314                         /* We compiled with -fno-common.  These are not
2315                            supposed to happen.  */
2316                         pr_debug("Common symbol: %s\n", name);
2317                         pr_warn("%s: please compile with -fno-common\n",
2318                                mod->name);
2319                         ret = -ENOEXEC;
2320                         break;
2321 
2322                 case SHN_ABS:
2323                         /* Don't need to do anything */
2324                         pr_debug("Absolute symbol: 0x%08lx\n",
2325                                (long)sym[i].st_value);
2326                         break;
2327 
2328                 case SHN_LIVEPATCH:
2329                         /* Livepatch symbols are resolved by livepatch */
2330                         break;
2331 
2332                 case SHN_UNDEF:
2333                         ksym = resolve_symbol_wait(mod, info, name);
2334                         /* Ok if resolved.  */
2335                         if (ksym && !IS_ERR(ksym)) {
2336                                 sym[i].st_value = kernel_symbol_value(ksym);
2337                                 break;
2338                         }
2339 
2340                         /* Ok if weak.  */
2341                         if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
2342                                 break;
2343 
2344                         ret = PTR_ERR(ksym) ?: -ENOENT;
2345                         pr_warn("%s: Unknown symbol %s (err %d)\n",
2346                                 mod->name, name, ret);
2347                         break;
2348 
2349                 default:
2350                         /* Divert to percpu allocation if a percpu var. */
2351                         if (sym[i].st_shndx == info->index.pcpu)
2352                                 secbase = (unsigned long)mod_percpu(mod);
2353                         else
2354                                 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
2355                         sym[i].st_value += secbase;
2356                         break;
2357                 }
2358         }
2359 
2360         return ret;
2361 }
2362 
2363 static int apply_relocations(struct module *mod, const struct load_info *info)
2364 {
2365         unsigned int i;
2366         int err = 0;
2367 
2368         /* Now do relocations. */
2369         for (i = 1; i < info->hdr->e_shnum; i++) {
2370                 unsigned int infosec = info->sechdrs[i].sh_info;
2371 
2372                 /* Not a valid relocation section? */
2373                 if (infosec >= info->hdr->e_shnum)
2374                         continue;
2375 
2376                 /* Don't bother with non-allocated sections */
2377                 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2378                         continue;
2379 
2380                 /* Livepatch relocation sections are applied by livepatch */
2381                 if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
2382                         continue;
2383 
2384                 if (info->sechdrs[i].sh_type == SHT_REL)
2385                         err = apply_relocate(info->sechdrs, info->strtab,
2386                                              info->index.sym, i, mod);
2387                 else if (info->sechdrs[i].sh_type == SHT_RELA)
2388                         err = apply_relocate_add(info->sechdrs, info->strtab,
2389                                                  info->index.sym, i, mod);
2390                 if (err < 0)
2391                         break;
2392         }
2393         return err;
2394 }
2395 
2396 /* Additional bytes needed by arch in front of individual sections */
2397 unsigned int __weak arch_mod_section_prepend(struct module *mod,
2398                                              unsigned int section)
2399 {
2400         /* default implementation just returns zero */
2401         return 0;
2402 }
2403 
2404 /* Update size with this section: return offset. */
2405 static long get_offset(struct module *mod, unsigned int *size,
2406                        Elf_Shdr *sechdr, unsigned int section)
2407 {
2408         long ret;
2409 
2410         *size += arch_mod_section_prepend(mod, section);
2411         ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2412         *size = ret + sechdr->sh_size;
2413         return ret;
2414 }
2415 
2416 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2417    might -- code, read-only data, read-write data, small data.  Tally
2418    sizes, and place the offsets into sh_entsize fields: high bit means it
2419    belongs in init. */
2420 static void layout_sections(struct module *mod, struct load_info *info)
2421 {
2422         static unsigned long const masks[][2] = {
2423                 /* NOTE: all executable code must be the first section
2424                  * in this array; otherwise modify the text_size
2425                  * finder in the two loops below */
2426                 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2427                 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2428                 { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL },
2429                 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2430                 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2431         };
2432         unsigned int m, i;
2433 
2434         for (i = 0; i < info->hdr->e_shnum; i++)
2435                 info->sechdrs[i].sh_entsize = ~0UL;
2436 
2437         pr_debug("Core section allocation order:\n");
2438         for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2439                 for (i = 0; i < info->hdr->e_shnum; ++i) {
2440                         Elf_Shdr *s = &info->sechdrs[i];
2441                         const char *sname = info->secstrings + s->sh_name;
2442 
2443                         if ((s->sh_flags & masks[m][0]) != masks[m][0]
2444                             || (s->sh_flags & masks[m][1])
2445                             || s->sh_entsize != ~0UL
2446                             || strstarts(sname, ".init"))
2447                                 continue;
2448                         s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
2449                         pr_debug("\t%s\n", sname);
2450                 }
2451                 switch (m) {
2452                 case 0: /* executable */
2453                         mod->core_layout.size = debug_align(mod->core_layout.size);
2454                         mod->core_layout.text_size = mod->core_layout.size;
2455                         break;
2456                 case 1: /* RO: text and ro-data */
2457                         mod->core_layout.size = debug_align(mod->core_layout.size);
2458                         mod->core_layout.ro_size = mod->core_layout.size;
2459                         break;
2460                 case 2: /* RO after init */
2461                         mod->core_layout.size = debug_align(mod->core_layout.size);
2462                         mod->core_layout.ro_after_init_size = mod->core_layout.size;
2463                         break;
2464                 case 4: /* whole core */
2465                         mod->core_layout.size = debug_align(mod->core_layout.size);
2466                         break;
2467                 }
2468         }
2469 
2470         pr_debug("Init section allocation order:\n");
2471         for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2472                 for (i = 0; i < info->hdr->e_shnum; ++i) {
2473                         Elf_Shdr *s = &info->sechdrs[i];
2474                         const char *sname = info->secstrings + s->sh_name;
2475 
2476                         if ((s->sh_flags & masks[m][0]) != masks[m][0]
2477                             || (s->sh_flags & masks[m][1])
2478                             || s->sh_entsize != ~0UL
2479                             || !strstarts(sname, ".init"))
2480                                 continue;
2481                         s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
2482                                          | INIT_OFFSET_MASK);
2483                         pr_debug("\t%s\n", sname);
2484                 }
2485                 switch (m) {
2486                 case 0: /* executable */
2487                         mod->init_layout.size = debug_align(mod->init_layout.size);
2488                         mod->init_layout.text_size = mod->init_layout.size;
2489                         break;
2490                 case 1: /* RO: text and ro-data */
2491                         mod->init_layout.size = debug_align(mod->init_layout.size);
2492                         mod->init_layout.ro_size = mod->init_layout.size;
2493                         break;
2494                 case 2:
2495                         /*
2496                          * RO after init doesn't apply to init_layout (only
2497                          * core_layout), so it just takes the value of ro_size.
2498                          */
2499                         mod->init_layout.ro_after_init_size = mod->init_layout.ro_size;
2500                         break;
2501                 case 4: /* whole init */
2502                         mod->init_layout.size = debug_align(mod->init_layout.size);
2503                         break;
2504                 }
2505         }
2506 }
2507 
2508 static void set_license(struct module *mod, const char *license)
2509 {
2510         if (!license)
2511                 license = "unspecified";
2512 
2513         if (!license_is_gpl_compatible(license)) {
2514                 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2515                         pr_warn("%s: module license '%s' taints kernel.\n",
2516                                 mod->name, license);
2517                 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2518                                  LOCKDEP_NOW_UNRELIABLE);
2519         }
2520 }
2521 
2522 /* Parse tag=value strings from .modinfo section */
2523 static char *next_string(char *string, unsigned long *secsize)
2524 {
2525         /* Skip non-zero chars */
2526         while (string[0]) {
2527                 string++;
2528                 if ((*secsize)-- <= 1)
2529                         return NULL;
2530         }
2531 
2532         /* Skip any zero padding. */
2533         while (!string[0]) {
2534                 string++;
2535                 if ((*secsize)-- <= 1)
2536                         return NULL;
2537         }
2538         return string;
2539 }
2540 
2541 static char *get_next_modinfo(const struct load_info *info, const char *tag,
2542                               char *prev)
2543 {
2544         char *p;
2545         unsigned int taglen = strlen(tag);
2546         Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2547         unsigned long size = infosec->sh_size;
2548 
2549         /*
2550          * get_modinfo() calls made before rewrite_section_headers()
2551          * must use sh_offset, as sh_addr isn't set!
2552          */
2553         char *modinfo = (char *)info->hdr + infosec->sh_offset;
2554 
2555         if (prev) {
2556                 size -= prev - modinfo;
2557                 modinfo = next_string(prev, &size);
2558         }
2559 
2560         for (p = modinfo; p; p = next_string(p, &size)) {
2561                 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2562                         return p + taglen + 1;
2563         }
2564         return NULL;
2565 }
2566 
2567 static char *get_modinfo(const struct load_info *info, const char *tag)
2568 {
2569         return get_next_modinfo(info, tag, NULL);
2570 }
2571 
2572 static void setup_modinfo(struct module *mod, struct load_info *info)
2573 {
2574         struct module_attribute *attr;
2575         int i;
2576 
2577         for (i = 0; (attr = modinfo_attrs[i]); i++) {
2578                 if (attr->setup)
2579                         attr->setup(mod, get_modinfo(info, attr->attr.name));
2580         }
2581 }
2582 
2583 static void free_modinfo(struct module *mod)
2584 {
2585         struct module_attribute *attr;
2586         int i;
2587 
2588         for (i = 0; (attr = modinfo_attrs[i]); i++) {
2589                 if (attr->free)
2590                         attr->free(mod);
2591         }
2592 }
2593 
2594 #ifdef CONFIG_KALLSYMS
2595 
2596 /* Lookup exported symbol in given range of kernel_symbols */
2597 static const struct kernel_symbol *lookup_exported_symbol(const char *name,
2598                                                           const struct kernel_symbol *start,
2599                                                           const struct kernel_symbol *stop)
2600 {
2601         return bsearch(name, start, stop - start,
2602                         sizeof(struct kernel_symbol), cmp_name);
2603 }
2604 
2605 static int is_exported(const char *name, unsigned long value,
2606                        const struct module *mod)
2607 {
2608         const struct kernel_symbol *ks;
2609         if (!mod)
2610                 ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab);
2611         else
2612                 ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms);
2613 
2614         return ks != NULL && kernel_symbol_value(ks) == value;
2615 }
2616 
2617 /* As per nm */
2618 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2619 {
2620         const Elf_Shdr *sechdrs = info->sechdrs;
2621 
2622         if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2623                 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2624                         return 'v';
2625                 else
2626                         return 'w';
2627         }
2628         if (sym->st_shndx == SHN_UNDEF)
2629                 return 'U';
2630         if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu)
2631                 return 'a';
2632         if (sym->st_shndx >= SHN_LORESERVE)
2633                 return '?';
2634         if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2635                 return 't';
2636         if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2637             && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2638                 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2639                         return 'r';
2640                 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2641                         return 'g';
2642                 else
2643                         return 'd';
2644         }
2645         if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2646                 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2647                         return 's';
2648                 else
2649                         return 'b';
2650         }
2651         if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2652                       ".debug")) {
2653                 return 'n';
2654         }
2655         return '?';
2656 }
2657 
2658 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2659                         unsigned int shnum, unsigned int pcpundx)
2660 {
2661         const Elf_Shdr *sec;
2662 
2663         if (src->st_shndx == SHN_UNDEF
2664             || src->st_shndx >= shnum
2665             || !src->st_name)
2666                 return false;
2667 
2668 #ifdef CONFIG_KALLSYMS_ALL
2669         if (src->st_shndx == pcpundx)
2670                 return true;
2671 #endif
2672 
2673         sec = sechdrs + src->st_shndx;
2674         if (!(sec->sh_flags & SHF_ALLOC)
2675 #ifndef CONFIG_KALLSYMS_ALL
2676             || !(sec->sh_flags & SHF_EXECINSTR)
2677 #endif
2678             || (sec->sh_entsize & INIT_OFFSET_MASK))
2679                 return false;
2680 
2681         return true;
2682 }
2683 
2684 /*
2685  * We only allocate and copy the strings needed by the parts of symtab
2686  * we keep.  This is simple, but has the effect of making multiple
2687  * copies of duplicates.  We could be more sophisticated, see
2688  * linux-kernel thread starting with
2689  * <73defb5e4bca04a6431392cc341112b1@localhost>.
2690  */
2691 static void layout_symtab(struct module *mod, struct load_info *info)
2692 {
2693         Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2694         Elf_Shdr *strsect = info->sechdrs + info->index.str;
2695         const Elf_Sym *src;
2696         unsigned int i, nsrc, ndst, strtab_size = 0;
2697 
2698         /* Put symbol section at end of init part of module. */
2699         symsect->sh_flags |= SHF_ALLOC;
2700         symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect,
2701                                          info->index.sym) | INIT_OFFSET_MASK;
2702         pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2703 
2704         src = (void *)info->hdr + symsect->sh_offset;
2705         nsrc = symsect->sh_size / sizeof(*src);
2706 
2707         /* Compute total space required for the core symbols' strtab. */
2708         for (ndst = i = 0; i < nsrc; i++) {
2709                 if (i == 0 || is_livepatch_module(mod) ||
2710                     is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2711                                    info->index.pcpu)) {
2712                         strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2713                         ndst++;
2714                 }
2715         }
2716 
2717         /* Append room for core symbols at end of core part. */
2718         info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1);
2719         info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym);
2720         mod->core_layout.size += strtab_size;
2721         info->core_typeoffs = mod->core_layout.size;
2722         mod->core_layout.size += ndst * sizeof(char);
2723         mod->core_layout.size = debug_align(mod->core_layout.size);
2724 
2725         /* Put string table section at end of init part of module. */
2726         strsect->sh_flags |= SHF_ALLOC;
2727         strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect,
2728                                          info->index.str) | INIT_OFFSET_MASK;
2729         pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2730 
2731         /* We'll tack temporary mod_kallsyms on the end. */
2732         mod->init_layout.size = ALIGN(mod->init_layout.size,
2733                                       __alignof__(struct mod_kallsyms));
2734         info->mod_kallsyms_init_off = mod->init_layout.size;
2735         mod->init_layout.size += sizeof(struct mod_kallsyms);
2736         info->init_typeoffs = mod->init_layout.size;
2737         mod->init_layout.size += nsrc * sizeof(char);
2738         mod->init_layout.size = debug_align(mod->init_layout.size);
2739 }
2740 
2741 /*
2742  * We use the full symtab and strtab which layout_symtab arranged to
2743  * be appended to the init section.  Later we switch to the cut-down
2744  * core-only ones.
2745  */
2746 static void add_kallsyms(struct module *mod, const struct load_info *info)
2747 {
2748         unsigned int i, ndst;
2749         const Elf_Sym *src;
2750         Elf_Sym *dst;
2751         char *s;
2752         Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2753 
2754         /* Set up to point into init section. */
2755         mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off;
2756 
2757         mod->kallsyms->symtab = (void *)symsec->sh_addr;
2758         mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2759         /* Make sure we get permanent strtab: don't use info->strtab. */
2760         mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2761         mod->kallsyms->typetab = mod->init_layout.base + info->init_typeoffs;
2762 
2763         /*
2764          * Now populate the cut down core kallsyms for after init
2765          * and set types up while we still have access to sections.
2766          */
2767         mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
2768         mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
2769         mod->core_kallsyms.typetab = mod->core_layout.base + info->core_typeoffs;
2770         src = mod->kallsyms->symtab;
2771         for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
2772                 mod->kallsyms->typetab[i] = elf_type(src + i, info);
2773                 if (i == 0 || is_livepatch_module(mod) ||
2774                     is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2775                                    info->index.pcpu)) {
2776                         mod->core_kallsyms.typetab[ndst] =
2777                             mod->kallsyms->typetab[i];
2778                         dst[ndst] = src[i];
2779                         dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
2780                         s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
2781                                      KSYM_NAME_LEN) + 1;
2782                 }
2783         }
2784         mod->core_kallsyms.num_symtab = ndst;
2785 }
2786 #else
2787 static inline void layout_symtab(struct module *mod, struct load_info *info)
2788 {
2789 }
2790 
2791 static void add_kallsyms(struct module *mod, const struct load_info *info)
2792 {
2793 }
2794 #endif /* CONFIG_KALLSYMS */
2795 
2796 static void dynamic_debug_setup(struct module *mod, struct _ddebug *debug, unsigned int num)
2797 {
2798         if (!debug)
2799                 return;
2800         ddebug_add_module(debug, num, mod->name);
2801 }
2802 
2803 static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug)
2804 {
2805         if (debug)
2806                 ddebug_remove_module(mod->name);
2807 }
2808 
2809 void * __weak module_alloc(unsigned long size)
2810 {
2811         return vmalloc_exec(size);
2812 }
2813 
2814 bool __weak module_exit_section(const char *name)
2815 {
2816         return strstarts(name, ".exit");
2817 }
2818 
2819 #ifdef CONFIG_DEBUG_KMEMLEAK
2820 static void kmemleak_load_module(const struct module *mod,
2821                                  const struct load_info *info)
2822 {
2823         unsigned int i;
2824 
2825         /* only scan the sections containing data */
2826         kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2827 
2828         for (i = 1; i < info->hdr->e_shnum; i++) {
2829                 /* Scan all writable sections that's not executable */
2830                 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2831                     !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2832                     (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2833                         continue;
2834 
2835                 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2836                                    info->sechdrs[i].sh_size, GFP_KERNEL);
2837         }
2838 }
2839 #else
2840 static inline void kmemleak_load_module(const struct module *mod,
2841                                         const struct load_info *info)
2842 {
2843 }
2844 #endif
2845 
2846 #ifdef CONFIG_MODULE_SIG
2847 static int module_sig_check(struct load_info *info, int flags)
2848 {
2849         int err = -ENODATA;
2850         const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2851         const char *reason;
2852         const void *mod = info->hdr;
2853 
2854         /*
2855          * Require flags == 0, as a module with version information
2856          * removed is no longer the module that was signed
2857          */
2858         if (flags == 0 &&
2859             info->len > markerlen &&
2860             memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2861                 /* We truncate the module to discard the signature */
2862                 info->len -= markerlen;
2863                 err = mod_verify_sig(mod, info);
2864         }
2865 
2866         switch (err) {
2867         case 0:
2868                 info->sig_ok = true;
2869                 return 0;
2870 
2871                 /* We don't permit modules to be loaded into trusted kernels
2872                  * without a valid signature on them, but if we're not
2873                  * enforcing, certain errors are non-fatal.
2874                  */
2875         case -ENODATA:
2876                 reason = "Loading of unsigned module";
2877                 goto decide;
2878         case -ENOPKG:
2879                 reason = "Loading of module with unsupported crypto";
2880                 goto decide;
2881         case -ENOKEY:
2882                 reason = "Loading of module with unavailable key";
2883         decide:
2884                 if (is_module_sig_enforced()) {
2885                         pr_notice("%s is rejected\n", reason);
2886                         return -EKEYREJECTED;
2887                 }
2888 
2889                 return security_locked_down(LOCKDOWN_MODULE_SIGNATURE);
2890 
2891                 /* All other errors are fatal, including nomem, unparseable
2892                  * signatures and signature check failures - even if signatures
2893                  * aren't required.
2894                  */
2895         default:
2896                 return err;
2897         }
2898 }
2899 #else /* !CONFIG_MODULE_SIG */
2900 static int module_sig_check(struct load_info *info, int flags)
2901 {
2902         return 0;
2903 }
2904 #endif /* !CONFIG_MODULE_SIG */
2905 
2906 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2907 static int elf_header_check(struct load_info *info)
2908 {
2909         if (info->len < sizeof(*(info->hdr)))
2910                 return -ENOEXEC;
2911 
2912         if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2913             || info->hdr->e_type != ET_REL
2914             || !elf_check_arch(info->hdr)
2915             || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2916                 return -ENOEXEC;
2917 
2918         if (info->hdr->e_shoff >= info->len
2919             || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2920                 info->len - info->hdr->e_shoff))
2921                 return -ENOEXEC;
2922 
2923         return 0;
2924 }
2925 
2926 #define COPY_CHUNK_SIZE (16*PAGE_SIZE)
2927 
2928 static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
2929 {
2930         do {
2931                 unsigned long n = min(len, COPY_CHUNK_SIZE);
2932 
2933                 if (copy_from_user(dst, usrc, n) != 0)
2934                         return -EFAULT;
2935                 cond_resched();
2936                 dst += n;
2937                 usrc += n;
2938                 len -= n;
2939         } while (len);
2940         return 0;
2941 }
2942 
2943 #ifdef CONFIG_LIVEPATCH
2944 static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
2945 {
2946         if (get_modinfo(info, "livepatch")) {
2947                 mod->klp = true;
2948                 add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
2949                 pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n",
2950                                mod->name);
2951         }
2952 
2953         return 0;
2954 }
2955 #else /* !CONFIG_LIVEPATCH */
2956 static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
2957 {
2958         if (get_modinfo(info, "livepatch")) {
2959                 pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
2960                        mod->name);
2961                 return -ENOEXEC;
2962         }
2963 
2964         return 0;
2965 }
2966 #endif /* CONFIG_LIVEPATCH */
2967 
2968 static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
2969 {
2970         if (retpoline_module_ok(get_modinfo(info, "retpoline")))
2971                 return;
2972 
2973         pr_warn("%s: loading module not compiled with retpoline compiler.\n",
2974                 mod->name);
2975 }
2976 
2977 /* Sets info->hdr and info->len. */
2978 static int copy_module_from_user(const void __user *umod, unsigned long len,
2979                                   struct load_info *info)
2980 {
2981         int err;
2982 
2983         info->len = len;
2984         if (info->len < sizeof(*(info->hdr)))
2985                 return -ENOEXEC;
2986 
2987         err = security_kernel_load_data(LOADING_MODULE);
2988         if (err)
2989                 return err;
2990 
2991         /* Suck in entire file: we'll want most of it. */
2992         info->hdr = __vmalloc(info->len,
2993                         GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL);
2994         if (!info->hdr)
2995                 return -ENOMEM;
2996 
2997         if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
2998                 vfree(info->hdr);
2999                 return -EFAULT;
3000         }
3001 
3002         return 0;
3003 }
3004 
3005 static void free_copy(struct load_info *info)
3006 {
3007         vfree(info->hdr);
3008 }
3009 
3010 static int rewrite_section_headers(struct load_info *info, int flags)
3011 {
3012         unsigned int i;
3013 
3014         /* This should always be true, but let's be sure. */
3015         info->sechdrs[0].sh_addr = 0;
3016 
3017         for (i = 1; i < info->hdr->e_shnum; i++) {
3018                 Elf_Shdr *shdr = &info->sechdrs[i];
3019                 if (shdr->sh_type != SHT_NOBITS
3020                     && info->len < shdr->sh_offset + shdr->sh_size) {
3021                         pr_err("Module len %lu truncated\n", info->len);
3022                         return -ENOEXEC;
3023                 }
3024 
3025                 /* Mark all sections sh_addr with their address in the
3026                    temporary image. */
3027                 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
3028 
3029 #ifndef CONFIG_MODULE_UNLOAD
3030                 /* Don't load .exit sections */
3031                 if (module_exit_section(info->secstrings+shdr->sh_name))
3032                         shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
3033 #endif
3034         }
3035 
3036         /* Track but don't keep modinfo and version sections. */
3037         info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
3038         info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
3039 
3040         return 0;
3041 }
3042 
3043 /*
3044  * Set up our basic convenience variables (pointers to section headers,
3045  * search for module section index etc), and do some basic section
3046  * verification.
3047  *
3048  * Set info->mod to the temporary copy of the module in info->hdr. The final one
3049  * will be allocated in move_module().
3050  */
3051 static int setup_load_info(struct load_info *info, int flags)
3052 {
3053         unsigned int i;
3054 
3055         /* Set up the convenience variables */
3056         info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
3057         info->secstrings = (void *)info->hdr
3058                 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
3059 
3060         /* Try to find a name early so we can log errors with a module name */
3061         info->index.info = find_sec(info, ".modinfo");
3062         if (info->index.info)
3063                 info->name = get_modinfo(info, "name");
3064 
3065         /* Find internal symbols and strings. */
3066         for (i = 1; i < info->hdr->e_shnum; i++) {
3067                 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
3068                         info->index.sym = i;
3069                         info->index.str = info->sechdrs[i].sh_link;
3070                         info->strtab = (char *)info->hdr
3071                                 + info->sechdrs[info->index.str].sh_offset;
3072                         break;
3073                 }
3074         }
3075 
3076         if (info->index.sym == 0) {
3077                 pr_warn("%s: module has no symbols (stripped?)\n",
3078                         info->name ?: "(missing .modinfo section or name field)");
3079                 return -ENOEXEC;
3080         }
3081 
3082         info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
3083         if (!info->index.mod) {
3084                 pr_warn("%s: No module found in object\n",
3085                         info->name ?: "(missing .modinfo section or name field)");
3086                 return -ENOEXEC;
3087         }
3088         /* This is temporary: point mod into copy of data. */
3089         info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset;
3090 
3091         /*
3092          * If we didn't load the .modinfo 'name' field earlier, fall back to
3093          * on-disk struct mod 'name' field.
3094          */
3095         if (!info->name)
3096                 info->name = info->mod->name;
3097 
3098         if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
3099                 info->index.vers = 0; /* Pretend no __versions section! */
3100         else
3101                 info->index.vers = find_sec(info, "__versions");
3102 
3103         info->index.pcpu = find_pcpusec(info);
3104 
3105         return 0;
3106 }
3107 
3108 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
3109 {
3110         const char *modmagic = get_modinfo(info, "vermagic");
3111         int err;
3112 
3113         if (flags & MODULE_INIT_IGNORE_VERMAGIC)
3114                 modmagic = NULL;
3115 
3116         /* This is allowed: modprobe --force will invalidate it. */
3117         if (!modmagic) {
3118                 err = try_to_force_load(mod, "bad vermagic");
3119                 if (err)
3120                         return err;
3121         } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
3122                 pr_err("%s: version magic '%s' should be '%s'\n",
3123                        info->name, modmagic, vermagic);
3124                 return -ENOEXEC;
3125         }
3126 
3127         if (!get_modinfo(info, "intree")) {
3128                 if (!test_taint(TAINT_OOT_MODULE))
3129                         pr_warn("%s: loading out-of-tree module taints kernel.\n",
3130                                 mod->name);
3131                 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
3132         }
3133 
3134         check_modinfo_retpoline(mod, info);
3135 
3136         if (get_modinfo(info, "staging")) {
3137                 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
3138                 pr_warn("%s: module is from the staging directory, the quality "
3139                         "is unknown, you have been warned.\n", mod->name);
3140         }
3141 
3142         err = check_modinfo_livepatch(mod, info);
3143         if (err)
3144                 return err;
3145 
3146         /* Set up license info based on the info section */
3147         set_license(mod, get_modinfo(info, "license"));
3148 
3149         return 0;
3150 }
3151 
3152 static int find_module_sections(struct module *mod, struct load_info *info)
3153 {
3154         mod->kp = section_objs(info, "__param",
3155                                sizeof(*mod->kp), &mod->num_kp);
3156         mod->syms = section_objs(info, "__ksymtab",
3157                                  sizeof(*mod->syms), &mod->num_syms);
3158         mod->crcs = section_addr(info, "__kcrctab");
3159         mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
3160                                      sizeof(*mod->gpl_syms),
3161                                      &mod->num_gpl_syms);
3162         mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
3163         mod->gpl_future_syms = section_objs(info,
3164                                             "__ksymtab_gpl_future",
3165                                             sizeof(*mod->gpl_future_syms),
3166                                             &mod->num_gpl_future_syms);
3167         mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
3168 
3169 #ifdef CONFIG_UNUSED_SYMBOLS
3170         mod->unused_syms = section_objs(info, "__ksymtab_unused",
3171                                         sizeof(*mod->unused_syms),
3172                                         &mod->num_unused_syms);
3173         mod->unused_crcs = section_addr(info, "__kcrctab_unused");
3174         mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
3175                                             sizeof(*mod->unused_gpl_syms),
3176                                             &mod->num_unused_gpl_syms);
3177         mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
3178 #endif
3179 #ifdef CONFIG_CONSTRUCTORS
3180         mod->ctors = section_objs(info, ".ctors",
3181                                   sizeof(*mod->ctors), &mod->num_ctors);
3182         if (!mod->ctors)
3183                 mod->ctors = section_objs(info, ".init_array",
3184                                 sizeof(*mod->ctors), &mod->num_ctors);
3185         else if (find_sec(info, ".init_array")) {
3186                 /*
3187                  * This shouldn't happen with same compiler and binutils
3188                  * building all parts of the module.
3189                  */
3190                 pr_warn("%s: has both .ctors and .init_array.\n",
3191                        mod->name);
3192                 return -EINVAL;
3193         }
3194 #endif
3195 
3196 #ifdef CONFIG_TRACEPOINTS
3197         mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
3198                                              sizeof(*mod->tracepoints_ptrs),
3199                                              &mod->num_tracepoints);
3200 #endif
3201 #ifdef CONFIG_TREE_SRCU
3202         mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs",
3203                                              sizeof(*mod->srcu_struct_ptrs),
3204                                              &mod->num_srcu_structs);
3205 #endif
3206 #ifdef CONFIG_BPF_EVENTS
3207         mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map",
3208                                            sizeof(*mod->bpf_raw_events),
3209                                            &mod->num_bpf_raw_events);
3210 #endif
3211 #ifdef CONFIG_JUMP_LABEL
3212         mod->jump_entries = section_objs(info, "__jump_table",
3213                                         sizeof(*mod->jump_entries),
3214                                         &mod->num_jump_entries);
3215 #endif
3216 #ifdef CONFIG_EVENT_TRACING
3217         mod->trace_events = section_objs(info, "_ftrace_events",
3218                                          sizeof(*mod->trace_events),
3219                                          &mod->num_trace_events);
3220         mod->trace_evals = section_objs(info, "_ftrace_eval_map",
3221                                         sizeof(*mod->trace_evals),
3222                                         &mod->num_trace_evals);
3223 #endif
3224 #ifdef CONFIG_TRACING
3225         mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
3226                                          sizeof(*mod->trace_bprintk_fmt_start),
3227                                          &mod->num_trace_bprintk_fmt);
3228 #endif
3229 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
3230         /* sechdrs[0].sh_size is always zero */
3231         mod->ftrace_callsites = section_objs(info, "__mcount_loc",
3232                                              sizeof(*mod->ftrace_callsites),
3233                                              &mod->num_ftrace_callsites);
3234 #endif
3235 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
3236         mod->ei_funcs = section_objs(info, "_error_injection_whitelist",
3237                                             sizeof(*mod->ei_funcs),
3238                                             &mod->num_ei_funcs);
3239 #endif
3240         mod->extable = section_objs(info, "__ex_table",
3241                                     sizeof(*mod->extable), &mod->num_exentries);
3242 
3243         if (section_addr(info, "__obsparm"))
3244                 pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
3245 
3246         info->debug = section_objs(info, "__verbose",
3247                                    sizeof(*info->debug), &info->num_debug);
3248 
3249         return 0;
3250 }
3251 
3252 static int move_module(struct module *mod, struct load_info *info)
3253 {
3254         int i;
3255         void *ptr;
3256 
3257         /* Do the allocs. */
3258         ptr = module_alloc(mod->core_layout.size);
3259         /*
3260          * The pointer to this block is stored in the module structure
3261          * which is inside the block. Just mark it as not being a
3262          * leak.
3263          */
3264         kmemleak_not_leak(ptr);
3265         if (!ptr)
3266                 return -ENOMEM;
3267 
3268         memset(ptr, 0, mod->core_layout.size);
3269         mod->core_layout.base = ptr;
3270 
3271         if (mod->init_layout.size) {
3272                 ptr = module_alloc(mod->init_layout.size);
3273                 /*
3274                  * The pointer to this block is stored in the module structure
3275                  * which is inside the block. This block doesn't need to be
3276                  * scanned as it contains data and code that will be freed
3277                  * after the module is initialized.
3278                  */
3279                 kmemleak_ignore(ptr);
3280                 if (!ptr) {
3281                         module_memfree(mod->core_layout.base);
3282                         return -ENOMEM;
3283                 }
3284                 memset(ptr, 0, mod->init_layout.size);
3285                 mod->init_layout.base = ptr;
3286         } else
3287                 mod->init_layout.base = NULL;
3288 
3289         /* Transfer each section which specifies SHF_ALLOC */
3290         pr_debug("final section addresses:\n");
3291         for (i = 0; i < info->hdr->e_shnum; i++) {
3292                 void *dest;
3293                 Elf_Shdr *shdr = &info->sechdrs[i];
3294 
3295                 if (!(shdr->sh_flags & SHF_ALLOC))
3296                         continue;
3297 
3298                 if (shdr->sh_entsize & INIT_OFFSET_MASK)
3299                         dest = mod->init_layout.base
3300                                 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
3301                 else
3302                         dest = mod->core_layout.base + shdr->sh_entsize;
3303 
3304                 if (shdr->sh_type != SHT_NOBITS)
3305                         memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
3306                 /* Update sh_addr to point to copy in image. */
3307                 shdr->sh_addr = (unsigned long)dest;
3308                 pr_debug("\t0x%lx %s\n",
3309                          (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
3310         }
3311 
3312         return 0;
3313 }
3314 
3315 static int check_module_license_and_versions(struct module *mod)
3316 {
3317         int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
3318 
3319         /*
3320          * ndiswrapper is under GPL by itself, but loads proprietary modules.
3321          * Don't use add_taint_module(), as it would prevent ndiswrapper from
3322          * using GPL-only symbols it needs.
3323          */
3324         if (strcmp(mod->name, "ndiswrapper") == 0)
3325                 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
3326 
3327         /* driverloader was caught wrongly pretending to be under GPL */
3328         if (strcmp(mod->name, "driverloader") == 0)
3329                 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3330                                  LOCKDEP_NOW_UNRELIABLE);
3331 
3332         /* lve claims to be GPL but upstream won't provide source */
3333         if (strcmp(mod->name, "lve") == 0)
3334                 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3335                                  LOCKDEP_NOW_UNRELIABLE);
3336 
3337         if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
3338                 pr_warn("%s: module license taints kernel.\n", mod->name);
3339 
3340 #ifdef CONFIG_MODVERSIONS
3341         if ((mod->num_syms && !mod->crcs)
3342             || (mod->num_gpl_syms && !mod->gpl_crcs)
3343             || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
3344 #ifdef CONFIG_UNUSED_SYMBOLS
3345             || (mod->num_unused_syms && !mod->unused_crcs)
3346             || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
3347 #endif
3348                 ) {
3349                 return try_to_force_load(mod,
3350                                          "no versions for exported symbols");
3351         }
3352 #endif
3353         return 0;
3354 }
3355 
3356 static void flush_module_icache(const struct module *mod)
3357 {
3358         mm_segment_t old_fs;
3359 
3360         /* flush the icache in correct context */
3361         old_fs = get_fs();
3362         set_fs(KERNEL_DS);
3363 
3364         /*
3365          * Flush the instruction cache, since we've played with text.
3366          * Do it before processing of module parameters, so the module
3367          * can provide parameter accessor functions of its own.
3368          */
3369         if (mod->init_layout.base)
3370                 flush_icache_range((unsigned long)mod->init_layout.base,
3371                                    (unsigned long)mod->init_layout.base
3372                                    + mod->init_layout.size);
3373         flush_icache_range((unsigned long)mod->core_layout.base,
3374                            (unsigned long)mod->core_layout.base + mod->core_layout.size);
3375 
3376         set_fs(old_fs);
3377 }
3378 
3379 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
3380                                      Elf_Shdr *sechdrs,
3381                                      char *secstrings,
3382                                      struct module *mod)
3383 {
3384         return 0;
3385 }
3386 
3387 /* module_blacklist is a comma-separated list of module names */
3388 static char *module_blacklist;
3389 static bool blacklisted(const char *module_name)
3390 {
3391         const char *p;
3392         size_t len;
3393 
3394         if (!module_blacklist)
3395                 return false;
3396 
3397         for (p = module_blacklist; *p; p += len) {
3398                 len = strcspn(p, ",");
3399                 if (strlen(module_name) == len && !memcmp(module_name, p, len))
3400                         return true;
3401                 if (p[len] == ',')
3402                         len++;
3403         }
3404         return false;
3405 }
3406 core_param(module_blacklist, module_blacklist, charp, 0400);
3407 
3408 static struct module *layout_and_allocate(struct load_info *info, int flags)
3409 {
3410         struct module *mod;
3411         unsigned int ndx;
3412         int err;
3413 
3414         err = check_modinfo(info->mod, info, flags);
3415         if (err)
3416                 return ERR_PTR(err);
3417 
3418         /* Allow arches to frob section contents and sizes.  */
3419         err = module_frob_arch_sections(info->hdr, info->sechdrs,
3420                                         info->secstrings, info->mod);
3421         if (err < 0)
3422                 return ERR_PTR(err);
3423 
3424         /* We will do a special allocation for per-cpu sections later. */
3425         info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
3426 
3427         /*
3428          * Mark ro_after_init section with SHF_RO_AFTER_INIT so that
3429          * layout_sections() can put it in the right place.
3430          * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
3431          */
3432         ndx = find_sec(info, ".data..ro_after_init");
3433         if (ndx)
3434                 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
3435         /*
3436          * Mark the __jump_table section as ro_after_init as well: these data
3437          * structures are never modified, with the exception of entries that
3438          * refer to code in the __init section, which are annotated as such
3439          * at module load time.
3440          */
3441         ndx = find_sec(info, "__jump_table");
3442         if (ndx)
3443                 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
3444 
3445         /* Determine total sizes, and put offsets in sh_entsize.  For now
3446            this is done generically; there doesn't appear to be any
3447            special cases for the architectures. */
3448         layout_sections(info->mod, info);
3449         layout_symtab(info->mod, info);
3450 
3451         /* Allocate and move to the final place */
3452         err = move_module(info->mod, info);
3453         if (err)
3454                 return ERR_PTR(err);
3455 
3456         /* Module has been copied to its final place now: return it. */
3457         mod = (void *)info->sechdrs[info->index.mod].sh_addr;
3458         kmemleak_load_module(mod, info);
3459         return mod;
3460 }
3461 
3462 /* mod is no longer valid after this! */
3463 static void module_deallocate(struct module *mod, struct load_info *info)
3464 {
3465         percpu_modfree(mod);
3466         module_arch_freeing_init(mod);
3467         module_memfree(mod->init_layout.base);
3468         module_memfree(mod->core_layout.base);
3469 }
3470 
3471 int __weak module_finalize(const Elf_Ehdr *hdr,
3472                            const Elf_Shdr *sechdrs,
3473                            struct module *me)
3474 {
3475         return 0;
3476 }
3477 
3478 static int post_relocation(struct module *mod, const struct load_info *info)
3479 {
3480         /* Sort exception table now relocations are done. */
3481         sort_extable(mod->extable, mod->extable + mod->num_exentries);
3482 
3483         /* Copy relocated percpu area over. */
3484         percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
3485                        info->sechdrs[info->index.pcpu].sh_size);
3486 
3487         /* Setup kallsyms-specific fields. */
3488         add_kallsyms(mod, info);
3489 
3490         /* Arch-specific module finalizing. */
3491         return module_finalize(info->hdr, info->sechdrs, mod);
3492 }
3493 
3494 /* Is this module of this name done loading?  No locks held. */
3495 static bool finished_loading(const char *name)
3496 {
3497         struct module *mod;
3498         bool ret;
3499 
3500         /*
3501          * The module_mutex should not be a heavily contended lock;
3502          * if we get the occasional sleep here, we'll go an extra iteration
3503          * in the wait_event_interruptible(), which is harmless.
3504          */
3505         sched_annotate_sleep();
3506         mutex_lock(&module_mutex);
3507         mod = find_module_all(name, strlen(name), true);
3508         ret = !mod || mod->state == MODULE_STATE_LIVE;
3509         mutex_unlock(&module_mutex);
3510 
3511         return ret;
3512 }
3513 
3514 /* Call module constructors. */
3515 static void do_mod_ctors(struct module *mod)
3516 {
3517 #ifdef CONFIG_CONSTRUCTORS
3518         unsigned long i;
3519 
3520         for (i = 0; i < mod->num_ctors; i++)
3521                 mod->ctors[i]();
3522 #endif
3523 }
3524 
3525 /* For freeing module_init on success, in case kallsyms traversing */
3526 struct mod_initfree {
3527         struct llist_node node;
3528         void *module_init;
3529 };
3530 
3531 static void do_free_init(struct work_struct *w)
3532 {
3533         struct llist_node *pos, *n, *list;
3534         struct mod_initfree *initfree;
3535 
3536         list = llist_del_all(&init_free_list);
3537 
3538         synchronize_rcu();
3539 
3540         llist_for_each_safe(pos, n, list) {
3541                 initfree = container_of(pos, struct mod_initfree, node);
3542                 module_memfree(initfree->module_init);
3543                 kfree(initfree);
3544         }
3545 }
3546 
3547 static int __init modules_wq_init(void)
3548 {
3549         INIT_WORK(&init_free_wq, do_free_init);
3550         init_llist_head(&init_free_list);
3551         return 0;
3552 }
3553 module_init(modules_wq_init);
3554 
3555 /*
3556  * This is where the real work happens.
3557  *
3558  * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
3559  * helper command 'lx-symbols'.
3560  */
3561 static noinline int do_init_module(struct module *mod)
3562 {
3563         int ret = 0;
3564         struct mod_initfree *freeinit;
3565 
3566         freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
3567         if (!freeinit) {
3568                 ret = -ENOMEM;
3569                 goto fail;
3570         }
3571         freeinit->module_init = mod->init_layout.base;
3572 
3573         /*
3574          * We want to find out whether @mod uses async during init.  Clear
3575          * PF_USED_ASYNC.  async_schedule*() will set it.
3576          */
3577         current->flags &= ~PF_USED_ASYNC;
3578 
3579         do_mod_ctors(mod);
3580         /* Start the module */
3581         if (mod->init != NULL)
3582                 ret = do_one_initcall(mod->init);
3583         if (ret < 0) {
3584                 goto fail_free_freeinit;
3585         }
3586         if (ret > 0) {
3587                 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3588                         "follow 0/-E convention\n"
3589                         "%s: loading module anyway...\n",
3590                         __func__, mod->name, ret, __func__);
3591                 dump_stack();
3592         }
3593 
3594         /* Now it's a first class citizen! */
3595         mod->state = MODULE_STATE_LIVE;
3596         blocking_notifier_call_chain(&module_notify_list,
3597                                      MODULE_STATE_LIVE, mod);
3598 
3599         /*
3600          * We need to finish all async code before the module init sequence
3601          * is done.  This has potential to deadlock.  For example, a newly
3602          * detected block device can trigger request_module() of the
3603          * default iosched from async probing task.  Once userland helper
3604          * reaches here, async_synchronize_full() will wait on the async
3605          * task waiting on request_module() and deadlock.
3606          *
3607          * This deadlock is avoided by perfomring async_synchronize_full()
3608          * iff module init queued any async jobs.  This isn't a full
3609          * solution as it will deadlock the same if module loading from
3610          * async jobs nests more than once; however, due to the various
3611          * constraints, this hack seems to be the best option for now.
3612          * Please refer to the following thread for details.
3613          *
3614          * http://thread.gmane.org/gmane.linux.kernel/1420814
3615          */
3616         if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
3617                 async_synchronize_full();
3618 
3619         ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
3620                         mod->init_layout.size);
3621         mutex_lock(&module_mutex);
3622         /* Drop initial reference. */
3623         module_put(mod);
3624         trim_init_extable(mod);
3625 #ifdef CONFIG_KALLSYMS
3626         /* Switch to core kallsyms now init is done: kallsyms may be walking! */
3627         rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
3628 #endif
3629         module_enable_ro(mod, true);
3630         mod_tree_remove_init(mod);
3631         module_arch_freeing_init(mod);
3632         mod->init_layout.base = NULL;
3633         mod->init_layout.size = 0;
3634         mod->init_layout.ro_size = 0;
3635         mod->init_layout.ro_after_init_size = 0;
3636         mod->init_layout.text_size = 0;
3637         /*
3638          * We want to free module_init, but be aware that kallsyms may be
3639          * walking this with preempt disabled.  In all the failure paths, we
3640          * call synchronize_rcu(), but we don't want to slow down the success
3641          * path. module_memfree() cannot be called in an interrupt, so do the
3642          * work and call synchronize_rcu() in a work queue.
3643          *
3644          * Note that module_alloc() on most architectures creates W+X page
3645          * mappings which won't be cleaned up until do_free_init() runs.  Any
3646          * code such as mark_rodata_ro() which depends on those mappings to
3647          * be cleaned up needs to sync with the queued work - ie
3648          * rcu_barrier()
3649          */
3650         if (llist_add(&freeinit->node, &init_free_list))
3651                 schedule_work(&init_free_wq);
3652 
3653         mutex_unlock(&module_mutex);
3654         wake_up_all(&module_wq);
3655 
3656         return 0;
3657 
3658 fail_free_freeinit:
3659         kfree(freeinit);
3660 fail:
3661         /* Try to protect us from buggy refcounters. */
3662         mod->state = MODULE_STATE_GOING;
3663         synchronize_rcu();
3664         module_put(mod);
3665         blocking_notifier_call_chain(&module_notify_list,
3666                                      MODULE_STATE_GOING, mod);
3667         klp_module_going(mod);
3668         ftrace_release_mod(mod);
3669         free_module(mod);
3670         wake_up_all(&module_wq);
3671         return ret;
3672 }
3673 
3674 static int may_init_module(void)
3675 {
3676         if (!capable(CAP_SYS_MODULE) || modules_disabled)
3677                 return -EPERM;
3678 
3679         return 0;
3680 }
3681 
3682 /*
3683  * We try to place it in the list now to make sure it's unique before
3684  * we dedicate too many resources.  In particular, temporary percpu
3685  * memory exhaustion.
3686  */
3687 static int add_unformed_module(struct module *mod)
3688 {
3689         int err;
3690         struct module *old;
3691 
3692         mod->state = MODULE_STATE_UNFORMED;
3693 
3694 again:
3695         mutex_lock(&module_mutex);
3696         old = find_module_all(mod->name, strlen(mod->name), true);
3697         if (old != NULL) {
3698                 if (old->state != MODULE_STATE_LIVE) {
3699                         /* Wait in case it fails to load. */
3700                         mutex_unlock(&module_mutex);
3701                         err = wait_event_interruptible(module_wq,
3702                                                finished_loading(mod->name));
3703                         if (err)
3704                                 goto out_unlocked;
3705                         goto again;
3706                 }
3707                 err = -EEXIST;
3708                 goto out;
3709         }
3710         mod_update_bounds(mod);
3711         list_add_rcu(&mod->list, &modules);
3712         mod_tree_insert(mod);
3713         err = 0;
3714 
3715 out:
3716         mutex_unlock(&module_mutex);
3717 out_unlocked:
3718         return err;
3719 }
3720 
3721 static int complete_formation(struct module *mod, struct load_info *info)
3722 {
3723         int err;
3724 
3725         mutex_lock(&module_mutex);
3726 
3727         /* Find duplicate symbols (must be called under lock). */
3728         err = verify_exported_symbols(mod);
3729         if (err < 0)
3730                 goto out;
3731 
3732         /* This relies on module_mutex for list integrity. */
3733         module_bug_finalize(info->hdr, info->sechdrs, mod);
3734 
3735         module_enable_ro(mod, false);
3736         module_enable_nx(mod);
3737         module_enable_x(mod);
3738 
3739         /* Mark state as coming so strong_try_module_get() ignores us,
3740          * but kallsyms etc. can see us. */
3741         mod->state = MODULE_STATE_COMING;
3742         mutex_unlock(&module_mutex);
3743 
3744         return 0;
3745 
3746 out:
3747         mutex_unlock(&module_mutex);
3748         return err;
3749 }
3750 
3751 static int prepare_coming_module(struct module *mod)
3752 {
3753         int err;
3754 
3755         ftrace_module_enable(mod);
3756         err = klp_module_coming(mod);
3757         if (err)
3758                 return err;
3759 
3760         blocking_notifier_call_chain(&module_notify_list,
3761                                      MODULE_STATE_COMING, mod);
3762         return 0;
3763 }
3764 
3765 static int unknown_module_param_cb(char *param, char *val, const char *modname,
3766                                    void *arg)
3767 {
3768         struct module *mod = arg;
3769         int ret;
3770 
3771         if (strcmp(param, "async_probe") == 0) {
3772                 mod->async_probe_requested = true;
3773                 return 0;
3774         }
3775 
3776         /* Check for magic 'dyndbg' arg */
3777         ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3778         if (ret != 0)
3779                 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
3780         return 0;
3781 }
3782 
3783 /* Allocate and load the module: note that size of section 0 is always
3784    zero, and we rely on this for optional sections. */
3785 static int load_module(struct load_info *info, const char __user *uargs,
3786                        int flags)
3787 {
3788         struct module *mod;
3789         long err = 0;
3790         char *after_dashes;
3791 
3792         err = elf_header_check(info);
3793         if (err)
3794                 goto free_copy;
3795 
3796         err = setup_load_info(info, flags);
3797         if (err)
3798                 goto free_copy;
3799 
3800         if (blacklisted(info->name)) {
3801                 err = -EPERM;
3802                 goto free_copy;
3803         }
3804 
3805         err = module_sig_check(info, flags);
3806         if (err)
3807                 goto free_copy;
3808 
3809         err = rewrite_section_headers(info, flags);
3810         if (err)
3811                 goto free_copy;
3812 
3813         /* Check module struct version now, before we try to use module. */
3814         if (!check_modstruct_version(info, info->mod)) {
3815                 err = -ENOEXEC;
3816                 goto free_copy;
3817         }
3818 
3819         /* Figure out module layout, and allocate all the memory. */
3820         mod = layout_and_allocate(info, flags);
3821         if (IS_ERR(mod)) {
3822                 err = PTR_ERR(mod);
3823                 goto free_copy;
3824         }
3825 
3826         audit_log_kern_module(mod->name);
3827 
3828         /* Reserve our place in the list. */
3829         err = add_unformed_module(mod);
3830         if (err)
3831                 goto free_module;
3832 
3833 #ifdef CONFIG_MODULE_SIG
3834         mod->sig_ok = info->sig_ok;
3835         if (!mod->sig_ok) {
3836                 pr_notice_once("%s: module verification failed: signature "
3837                                "and/or required key missing - tainting "
3838                                "kernel\n", mod->name);
3839                 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
3840         }
3841 #endif
3842 
3843         /* To avoid stressing percpu allocator, do this once we're unique. */
3844         err = percpu_modalloc(mod, info);
3845         if (err)
3846                 goto unlink_mod;
3847 
3848         /* Now module is in final location, initialize linked lists, etc. */
3849         err = module_unload_init(mod);
3850         if (err)
3851                 goto unlink_mod;
3852 
3853         init_param_lock(mod);
3854 
3855         /* Now we've got everything in the final locations, we can
3856          * find optional sections. */
3857         err = find_module_sections(mod, info);
3858         if (err)
3859                 goto free_unload;
3860 
3861         err = check_module_license_and_versions(mod);
3862         if (err)
3863                 goto free_unload;
3864 
3865         /* Set up MODINFO_ATTR fields */
3866         setup_modinfo(mod, info);
3867 
3868         /* Fix up syms, so that st_value is a pointer to location. */
3869         err = simplify_symbols(mod, info);
3870         if (err < 0)
3871                 goto free_modinfo;
3872 
3873         err = apply_relocations(mod, info);
3874         if (err < 0)
3875                 goto free_modinfo;
3876 
3877         err = post_relocation(mod, info);
3878         if (err < 0)
3879                 goto free_modinfo;
3880 
3881         flush_module_icache(mod);
3882 
3883         /* Now copy in args */
3884         mod->args = strndup_user(uargs, ~0UL >> 1);
3885         if (IS_ERR(mod->args)) {
3886                 err = PTR_ERR(mod->args);
3887                 goto free_arch_cleanup;
3888         }
3889 
3890         dynamic_debug_setup(mod, info->debug, info->num_debug);
3891 
3892         /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3893         ftrace_module_init(mod);
3894 
3895         /* Finally it's fully formed, ready to start executing. */
3896         err = complete_formation(mod, info);
3897         if (err)
3898                 goto ddebug_cleanup;
3899 
3900         err = prepare_coming_module(mod);
3901         if (err)
3902                 goto bug_cleanup;
3903 
3904         /* Module is ready to execute: parsing args may do that. */
3905         after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3906                                   -32768, 32767, mod,
3907                                   unknown_module_param_cb);
3908         if (IS_ERR(after_dashes)) {
3909                 err = PTR_ERR(after_dashes);
3910                 goto coming_cleanup;
3911         } else if (after_dashes) {
3912                 pr_warn("%s: parameters '%s' after `--' ignored\n",
3913                        mod->name, after_dashes);
3914         }
3915 
3916         /* Link in to sysfs. */
3917         err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3918         if (err < 0)
3919                 goto coming_cleanup;
3920 
3921         if (is_livepatch_module(mod)) {
3922                 err = copy_module_elf(mod, info);
3923                 if (err < 0)
3924                         goto sysfs_cleanup;
3925         }
3926 
3927         /* Get rid of temporary copy. */
3928         free_copy(info);
3929 
3930         /* Done! */
3931         trace_module_load(mod);
3932 
3933         return do_init_module(mod);
3934 
3935  sysfs_cleanup:
3936         mod_sysfs_teardown(mod);
3937  coming_cleanup:
3938         mod->state = MODULE_STATE_GOING;
3939         destroy_params(mod->kp, mod->num_kp);
3940         blocking_notifier_call_chain(&module_notify_list,
3941                                      MODULE_STATE_GOING, mod);
3942         klp_module_going(mod);
3943  bug_cleanup:
3944         /* module_bug_cleanup needs module_mutex protection */
3945         mutex_lock(&module_mutex);
3946         module_bug_cleanup(mod);
3947         mutex_unlock(&module_mutex);
3948 
3949  ddebug_cleanup:
3950         ftrace_release_mod(mod);
3951         dynamic_debug_remove(mod, info->debug);
3952         synchronize_rcu();
3953         kfree(mod->args);
3954  free_arch_cleanup:
3955         module_arch_cleanup(mod);
3956  free_modinfo:
3957         free_modinfo(mod);
3958  free_unload:
3959         module_unload_free(mod);
3960  unlink_mod:
3961         mutex_lock(&module_mutex);
3962         /* Unlink carefully: kallsyms could be walking list. */
3963         list_del_rcu(&mod->list);
3964         mod_tree_remove(mod);
3965         wake_up_all(&module_wq);
3966         /* Wait for RCU-sched synchronizing before releasing mod->list. */
3967         synchronize_rcu();
3968         mutex_unlock(&module_mutex);
3969  free_module:
3970         /* Free lock-classes; relies on the preceding sync_rcu() */
3971         lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
3972 
3973         module_deallocate(mod, info);
3974  free_copy:
3975         free_copy(info);
3976         return err;
3977 }
3978 
3979 SYSCALL_DEFINE3(init_module, void __user *, umod,
3980                 unsigned long, len, const char __user *, uargs)
3981 {
3982         int err;
3983         struct load_info info = { };
3984 
3985         err = may_init_module();
3986         if (err)
3987                 return err;
3988 
3989         pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3990                umod, len, uargs);
3991 
3992         err = copy_module_from_user(umod, len, &info);
3993         if (err)
3994                 return err;
3995 
3996         return load_module(&info, uargs, 0);
3997 }
3998 
3999 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
4000 {
4001         struct load_info info = { };
4002         loff_t size;
4003         void *hdr;
4004         int err;
4005 
4006         err = may_init_module();
4007         if (err)
4008                 return err;
4009 
4010         pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
4011 
4012         if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
4013                       |MODULE_INIT_IGNORE_VERMAGIC))
4014                 return -EINVAL;
4015 
4016         err = kernel_read_file_from_fd(fd, &hdr, &size, INT_MAX,
4017                                        READING_MODULE);
4018         if (err)
4019                 return err;
4020         info.hdr = hdr;
4021         info.len = size;
4022 
4023         return load_module(&info, uargs, flags);
4024 }
4025 
4026 static inline int within(unsigned long addr, void *start, unsigned long size)
4027 {
4028         return ((void *)addr >= start && (void *)addr < start + size);
4029 }
4030 
4031 #ifdef CONFIG_KALLSYMS
4032 /*
4033  * This ignores the intensely annoying "mapping symbols" found
4034  * in ARM ELF files: $a, $t and $d.
4035  */
4036 static inline int is_arm_mapping_symbol(const char *str)
4037 {
4038         if (str[0] == '.' && str[1] == 'L')
4039                 return true;
4040         return str[0] == '$' && strchr("axtd", str[1])
4041                && (str[2] == '\0' || str[2] == '.');
4042 }
4043 
4044 static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum)
4045 {
4046         return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
4047 }
4048 
4049 /*
4050  * Given a module and address, find the corresponding symbol and return its name
4051  * while providing its size and offset if needed.
4052  */
4053 static const char *find_kallsyms_symbol(struct module *mod,
4054                                         unsigned long addr,
4055                                         unsigned long *size,
4056                                         unsigned long *offset)
4057 {
4058         unsigned int i, best = 0;
4059         unsigned long nextval, bestval;
4060         struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
4061 
4062         /* At worse, next value is at end of module */
4063         if (within_module_init(addr, mod))
4064                 nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size;
4065         else
4066                 nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size;
4067 
4068         bestval = kallsyms_symbol_value(&kallsyms->symtab[best]);
4069 
4070         /* Scan for closest preceding symbol, and next symbol. (ELF
4071            starts real symbols at 1). */
4072         for (i = 1; i < kallsyms->num_symtab; i++) {
4073                 const Elf_Sym *sym = &kallsyms->symtab[i];
4074                 unsigned long thisval = kallsyms_symbol_value(sym);
4075 
4076                 if (sym->st_shndx == SHN_UNDEF)
4077                         continue;
4078 
4079                 /* We ignore unnamed symbols: they're uninformative
4080                  * and inserted at a whim. */
4081                 if (*kallsyms_symbol_name(kallsyms, i) == '\0'
4082                     || is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i)))
4083                         continue;
4084 
4085                 if (thisval <= addr && thisval > bestval) {
4086                         best = i;
4087                         bestval = thisval;
4088                 }
4089                 if (thisval > addr && thisval < nextval)
4090                         nextval = thisval;
4091         }
4092 
4093         if (!best)
4094                 return NULL;
4095 
4096         if (size)
4097                 *size = nextval - bestval;
4098         if (offset)
4099                 *offset = addr - bestval;
4100 
4101         return kallsyms_symbol_name(kallsyms, best);
4102 }
4103 
4104 void * __weak dereference_module_function_descriptor(struct module *mod,
4105                                                      void *ptr)
4106 {
4107         return ptr;
4108 }
4109 
4110 /* For kallsyms to ask for address resolution.  NULL means not found.  Careful
4111  * not to lock to avoid deadlock on oopses, simply disable preemption. */
4112 const char *module_address_lookup(unsigned long addr,
4113                             unsigned long *size,
4114                             unsigned long *offset,
4115                             char **modname,
4116                             char *namebuf)
4117 {
4118         const char *ret = NULL;
4119         struct module *mod;
4120 
4121         preempt_disable();
4122         mod = __module_address(addr);
4123         if (mod) {
4124                 if (modname)
4125                         *modname = mod->name;
4126 
4127                 ret = find_kallsyms_symbol(mod, addr, size, offset);
4128         }
4129         /* Make a copy in here where it's safe */
4130         if (ret) {
4131                 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
4132                 ret = namebuf;
4133         }
4134         preempt_enable();
4135 
4136         return ret;
4137 }
4138 
4139 int lookup_module_symbol_name(unsigned long addr, char *symname)
4140 {
4141         struct module *mod;
4142 
4143         preempt_disable();
4144         list_for_each_entry_rcu(mod, &modules, list) {
4145                 if (mod->state == MODULE_STATE_UNFORMED)
4146                         continue;
4147                 if (within_module(addr, mod)) {
4148                         const char *sym;
4149 
4150                         sym = find_kallsyms_symbol(mod, addr, NULL, NULL);
4151                         if (!sym)
4152                                 goto out;
4153 
4154                         strlcpy(symname, sym, KSYM_NAME_LEN);
4155                         preempt_enable();
4156                         return 0;
4157                 }
4158         }
4159 out:
4160         preempt_enable();
4161         return -ERANGE;
4162 }
4163 
4164 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
4165                         unsigned long *offset, char *modname, char *name)
4166 {
4167         struct module *mod;
4168 
4169         preempt_disable();
4170         list_for_each_entry_rcu(mod, &modules, list) {
4171                 if (mod->state == MODULE_STATE_UNFORMED)
4172                         continue;
4173                 if (within_module(addr, mod)) {
4174                         const char *sym;
4175 
4176                         sym = find_kallsyms_symbol(mod, addr, size, offset);
4177                         if (!sym)
4178                                 goto out;
4179                         if (modname)
4180                                 strlcpy(modname, mod->name, MODULE_NAME_LEN);
4181                         if (name)
4182                                 strlcpy(name, sym, KSYM_NAME_LEN);
4183                         preempt_enable();
4184                         return 0;
4185                 }
4186         }
4187 out:
4188         preempt_enable();
4189         return -ERANGE;
4190 }
4191 
4192 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
4193                         char *name, char *module_name, int *exported)
4194 {
4195         struct module *mod;
4196 
4197         preempt_disable();
4198         list_for_each_entry_rcu(mod, &modules, list) {
4199                 struct mod_kallsyms *kallsyms;
4200 
4201                 if (mod->state == MODULE_STATE_UNFORMED)
4202                         continue;
4203                 kallsyms = rcu_dereference_sched(mod->kallsyms);
4204                 if (symnum < kallsyms->num_symtab) {
4205                         const Elf_Sym *sym = &kallsyms->symtab[symnum];
4206 
4207                         *value = kallsyms_symbol_value(sym);
4208                         *type = kallsyms->typetab[symnum];
4209                         strlcpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN);
4210                         strlcpy(module_name, mod->name, MODULE_NAME_LEN);
4211                         *exported = is_exported(name, *value, mod);
4212                         preempt_enable();
4213                         return 0;
4214                 }
4215                 symnum -= kallsyms->num_symtab;
4216         }
4217         preempt_enable();
4218         return -ERANGE;
4219 }
4220 
4221 /* Given a module and name of symbol, find and return the symbol's value */
4222 static unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name)
4223 {
4224         unsigned int i;
4225         struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
4226 
4227         for (i = 0; i < kallsyms->num_symtab; i++) {
4228                 const Elf_Sym *sym = &kallsyms->symtab[i];
4229 
4230                 if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 &&
4231                     sym->st_shndx != SHN_UNDEF)
4232                         return kallsyms_symbol_value(sym);
4233         }
4234         return 0;
4235 }
4236 
4237 /* Look for this name: can be of form module:name. */
4238 unsigned long module_kallsyms_lookup_name(const char *name)
4239 {
4240         struct module *mod;
4241         char *colon;
4242         unsigned long ret = 0;
4243 
4244         /* Don't lock: we're in enough trouble already. */
4245         preempt_disable();
4246         if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
4247                 if ((mod = find_module_all(name, colon - name, false)) != NULL)
4248                         ret = find_kallsyms_symbol_value(mod, colon+1);
4249         } else {
4250                 list_for_each_entry_rcu(mod, &modules, list) {
4251                         if (mod->state == MODULE_STATE_UNFORMED)
4252                                 continue;
4253                         if ((ret = find_kallsyms_symbol_value(mod, name)) != 0)
4254                                 break;
4255                 }
4256         }
4257         preempt_enable();
4258         return ret;
4259 }
4260 
4261 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
4262                                              struct module *, unsigned long),
4263                                    void *data)
4264 {
4265         struct module *mod;
4266         unsigned int i;
4267         int ret;
4268 
4269         module_assert_mutex();
4270 
4271         list_for_each_entry(mod, &modules, list) {
4272                 /* We hold module_mutex: no need for rcu_dereference_sched */
4273                 struct mod_kallsyms *kallsyms = mod->kallsyms;
4274 
4275                 if (mod->state == MODULE_STATE_UNFORMED)
4276                         continue;
4277                 for (i = 0; i < kallsyms->num_symtab; i++) {
4278                         const Elf_Sym *sym = &kallsyms->symtab[i];
4279 
4280                         if (sym->st_shndx == SHN_UNDEF)
4281                                 continue;
4282 
4283                         ret = fn(data, kallsyms_symbol_name(kallsyms, i),
4284                                  mod, kallsyms_symbol_value(sym));
4285                         if (ret != 0)
4286                                 return ret;
4287                 }
4288         }
4289         return 0;
4290 }
4291 #endif /* CONFIG_KALLSYMS */
4292 
4293 /* Maximum number of characters written by module_flags() */
4294 #define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4)
4295 
4296 /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
4297 static char *module_flags(struct module *mod, char *buf)
4298 {
4299         int bx = 0;
4300 
4301         BUG_ON(mod->state == MODULE_STATE_UNFORMED);
4302         if (mod->taints ||
4303             mod->state == MODULE_STATE_GOING ||
4304             mod->state == MODULE_STATE_COMING) {
4305                 buf[bx++] = '(';
4306                 bx += module_flags_taint(mod, buf + bx);
4307                 /* Show a - for module-is-being-unloaded */
4308                 if (mod->state == MODULE_STATE_GOING)
4309                         buf[bx++] = '-';
4310                 /* Show a + for module-is-being-loaded */
4311                 if (mod->state == MODULE_STATE_COMING)
4312                         buf[bx++] = '+';
4313                 buf[bx++] = ')';
4314         }
4315         buf[bx] = '\0';
4316 
4317         return buf;
4318 }
4319 
4320 #ifdef CONFIG_PROC_FS
4321 /* Called by the /proc file system to return a list of modules. */
4322 static void *m_start(struct seq_file *m, loff_t *pos)
4323 {
4324         mutex_lock(&module_mutex);
4325         return seq_list_start(&modules, *pos);
4326 }
4327 
4328 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
4329 {
4330         return seq_list_next(p, &modules, pos);
4331 }
4332 
4333 static void m_stop(struct seq_file *m, void *p)
4334 {
4335         mutex_unlock(&module_mutex);
4336 }
4337 
4338 static int m_show(struct seq_file *m, void *p)
4339 {
4340         struct module *mod = list_entry(p, struct module, list);
4341         char buf[MODULE_FLAGS_BUF_SIZE];
4342         void *value;
4343 
4344         /* We always ignore unformed modules. */
4345         if (mod->state == MODULE_STATE_UNFORMED)
4346                 return 0;
4347 
4348         seq_printf(m, "%s %u",
4349                    mod->name, mod->init_layout.size + mod->core_layout.size);
4350         print_unload_info(m, mod);
4351 
4352         /* Informative for users. */
4353         seq_printf(m, " %s",
4354                    mod->state == MODULE_STATE_GOING ? "Unloading" :
4355                    mod->state == MODULE_STATE_COMING ? "Loading" :
4356                    "Live");
4357         /* Used by oprofile and other similar tools. */
4358         value = m->private ? NULL : mod->core_layout.base;
4359         seq_printf(m, " 0x%px", value);
4360 
4361         /* Taints info */
4362         if (mod->taints)
4363                 seq_printf(m, " %s", module_flags(mod, buf));
4364 
4365         seq_puts(m, "\n");
4366         return 0;
4367 }
4368 
4369 /* Format: modulename size refcount deps address
4370 
4371    Where refcount is a number or -, and deps is a comma-separated list
4372    of depends or -.
4373 */
4374 static const struct seq_operations modules_op = {
4375         .start  = m_start,
4376         .next   = m_next,
4377         .stop   = m_stop,
4378         .show   = m_show
4379 };
4380 
4381 /*
4382  * This also sets the "private" pointer to non-NULL if the
4383  * kernel pointers should be hidden (so you can just test
4384  * "m->private" to see if you should keep the values private).
4385  *
4386  * We use the same logic as for /proc/kallsyms.
4387  */
4388 static int modules_open(struct inode *inode, struct file *file)
4389 {
4390         int err = seq_open(file, &modules_op);
4391 
4392         if (!err) {
4393                 struct seq_file *m = file->private_data;
4394                 m->private = kallsyms_show_value() ? NULL : (void *)8ul;
4395         }
4396 
4397         return err;
4398 }
4399 
4400 static const struct file_operations proc_modules_operations = {
4401         .open           = modules_open,
4402         .read           = seq_read,
4403         .llseek         = seq_lseek,
4404         .release        = seq_release,
4405 };
4406 
4407 static int __init proc_modules_init(void)
4408 {
4409         proc_create("modules", 0, NULL, &proc_modules_operations);
4410         return 0;
4411 }
4412 module_init(proc_modules_init);
4413 #endif
4414 
4415 /* Given an address, look for it in the module exception tables. */
4416 const struct exception_table_entry *search_module_extables(unsigned long addr)
4417 {
4418         const struct exception_table_entry *e = NULL;
4419         struct module *mod;
4420 
4421         preempt_disable();
4422         mod = __module_address(addr);
4423         if (!mod)
4424                 goto out;
4425 
4426         if (!mod->num_exentries)
4427                 goto out;
4428 
4429         e = search_extable(mod->extable,
4430                            mod->num_exentries,
4431                            addr);
4432 out:
4433         preempt_enable();
4434 
4435         /*
4436          * Now, if we found one, we are running inside it now, hence
4437          * we cannot unload the module, hence no refcnt needed.
4438          */
4439         return e;
4440 }
4441 
4442 /*
4443  * is_module_address - is this address inside a module?
4444  * @addr: the address to check.
4445  *
4446  * See is_module_text_address() if you simply want to see if the address
4447  * is code (not data).
4448  */
4449 bool is_module_address(unsigned long addr)
4450 {
4451         bool ret;
4452 
4453         preempt_disable();
4454         ret = __module_address(addr) != NULL;
4455         preempt_enable();
4456 
4457         return ret;
4458 }
4459 
4460 /*
4461  * __module_address - get the module which contains an address.
4462  * @addr: the address.
4463  *
4464  * Must be called with preempt disabled or module mutex held so that
4465  * module doesn't get freed during this.
4466  */
4467 struct module *__module_address(unsigned long addr)
4468 {
4469         struct module *mod;
4470 
4471         if (addr < module_addr_min || addr > module_addr_max)
4472                 return NULL;
4473 
4474         module_assert_mutex_or_preempt();
4475 
4476         mod = mod_find(addr);
4477         if (mod) {
4478                 BUG_ON(!within_module(addr, mod));
4479                 if (mod->state == MODULE_STATE_UNFORMED)
4480                         mod = NULL;
4481         }
4482         return mod;
4483 }
4484 EXPORT_SYMBOL_GPL(__module_address);
4485 
4486 /*
4487  * is_module_text_address - is this address inside module code?
4488  * @addr: the address to check.
4489  *
4490  * See is_module_address() if you simply want to see if the address is
4491  * anywhere in a module.  See kernel_text_address() for testing if an
4492  * address corresponds to kernel or module code.
4493  */
4494 bool is_module_text_address(unsigned long addr)
4495 {
4496         bool ret;
4497 
4498         preempt_disable();
4499         ret = __module_text_address(addr) != NULL;
4500         preempt_enable();
4501 
4502         return ret;
4503 }
4504 
4505 /*
4506  * __module_text_address - get the module whose code contains an address.
4507  * @addr: the address.
4508  *
4509  * Must be called with preempt disabled or module mutex held so that
4510  * module doesn't get freed during this.
4511  */
4512 struct module *__module_text_address(unsigned long addr)
4513 {
4514         struct module *mod = __module_address(addr);
4515         if (mod) {
4516                 /* Make sure it's within the text section. */
4517                 if (!within(addr, mod->init_layout.base, mod->init_layout.text_size)
4518                     && !within(addr, mod->core_layout.base, mod->core_layout.text_size))
4519                         mod = NULL;
4520         }
4521         return mod;
4522 }
4523 EXPORT_SYMBOL_GPL(__module_text_address);
4524 
4525 /* Don't grab lock, we're oopsing. */
4526 void print_modules(void)
4527 {
4528         struct module *mod;
4529         char buf[MODULE_FLAGS_BUF_SIZE];
4530 
4531         printk(KERN_DEFAULT "Modules linked in:");
4532         /* Most callers should already have preempt disabled, but make sure */
4533         preempt_disable();
4534         list_for_each_entry_rcu(mod, &modules, list) {
4535                 if (mod->state == MODULE_STATE_UNFORMED)
4536                         continue;
4537                 pr_cont(" %s%s", mod->name, module_flags(mod, buf));
4538         }
4539         preempt_enable();
4540         if (last_unloaded_module[0])
4541                 pr_cont(" [last unloaded: %s]", last_unloaded_module);
4542         pr_cont("\n");
4543 }
4544 
4545 #ifdef CONFIG_MODVERSIONS
4546 /* Generate the signature for all relevant module structures here.
4547  * If these change, we don't want to try to parse the module. */
4548 void module_layout(struct module *mod,
4549                    struct modversion_info *ver,
4550                    struct kernel_param *kp,
4551                    struct kernel_symbol *ks,
4552                    struct tracepoint * const *tp)
4553 {
4554 }
4555 EXPORT_SYMBOL(module_layout);
4556 #endif

/* [<][>][^][v][top][bottom][index][help] */