root/kernel/user.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. uid_hash_insert
  2. uid_hash_remove
  3. uid_hash_find
  4. free_user
  5. find_user
  6. free_uid
  7. alloc_uid
  8. uid_cache_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * The "user cache".
   4  *
   5  * (C) Copyright 1991-2000 Linus Torvalds
   6  *
   7  * We have a per-user structure to keep track of how many
   8  * processes, files etc the user has claimed, in order to be
   9  * able to have per-user limits for system resources. 
  10  */
  11 
  12 #include <linux/init.h>
  13 #include <linux/sched.h>
  14 #include <linux/slab.h>
  15 #include <linux/bitops.h>
  16 #include <linux/key.h>
  17 #include <linux/sched/user.h>
  18 #include <linux/interrupt.h>
  19 #include <linux/export.h>
  20 #include <linux/user_namespace.h>
  21 #include <linux/proc_ns.h>
  22 
  23 /*
  24  * userns count is 1 for root user, 1 for init_uts_ns,
  25  * and 1 for... ?
  26  */
  27 struct user_namespace init_user_ns = {
  28         .uid_map = {
  29                 .nr_extents = 1,
  30                 {
  31                         .extent[0] = {
  32                                 .first = 0,
  33                                 .lower_first = 0,
  34                                 .count = 4294967295U,
  35                         },
  36                 },
  37         },
  38         .gid_map = {
  39                 .nr_extents = 1,
  40                 {
  41                         .extent[0] = {
  42                                 .first = 0,
  43                                 .lower_first = 0,
  44                                 .count = 4294967295U,
  45                         },
  46                 },
  47         },
  48         .projid_map = {
  49                 .nr_extents = 1,
  50                 {
  51                         .extent[0] = {
  52                                 .first = 0,
  53                                 .lower_first = 0,
  54                                 .count = 4294967295U,
  55                         },
  56                 },
  57         },
  58         .count = ATOMIC_INIT(3),
  59         .owner = GLOBAL_ROOT_UID,
  60         .group = GLOBAL_ROOT_GID,
  61         .ns.inum = PROC_USER_INIT_INO,
  62 #ifdef CONFIG_USER_NS
  63         .ns.ops = &userns_operations,
  64 #endif
  65         .flags = USERNS_INIT_FLAGS,
  66 #ifdef CONFIG_KEYS
  67         .keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list),
  68         .keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem),
  69 #endif
  70 };
  71 EXPORT_SYMBOL_GPL(init_user_ns);
  72 
  73 /*
  74  * UID task count cache, to get fast user lookup in "alloc_uid"
  75  * when changing user ID's (ie setuid() and friends).
  76  */
  77 
  78 #define UIDHASH_BITS    (CONFIG_BASE_SMALL ? 3 : 7)
  79 #define UIDHASH_SZ      (1 << UIDHASH_BITS)
  80 #define UIDHASH_MASK            (UIDHASH_SZ - 1)
  81 #define __uidhashfn(uid)        (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
  82 #define uidhashentry(uid)       (uidhash_table + __uidhashfn((__kuid_val(uid))))
  83 
  84 static struct kmem_cache *uid_cachep;
  85 struct hlist_head uidhash_table[UIDHASH_SZ];
  86 
  87 /*
  88  * The uidhash_lock is mostly taken from process context, but it is
  89  * occasionally also taken from softirq/tasklet context, when
  90  * task-structs get RCU-freed. Hence all locking must be softirq-safe.
  91  * But free_uid() is also called with local interrupts disabled, and running
  92  * local_bh_enable() with local interrupts disabled is an error - we'll run
  93  * softirq callbacks, and they can unconditionally enable interrupts, and
  94  * the caller of free_uid() didn't expect that..
  95  */
  96 static DEFINE_SPINLOCK(uidhash_lock);
  97 
  98 /* root_user.__count is 1, for init task cred */
  99 struct user_struct root_user = {
 100         .__count        = REFCOUNT_INIT(1),
 101         .processes      = ATOMIC_INIT(1),
 102         .sigpending     = ATOMIC_INIT(0),
 103         .locked_shm     = 0,
 104         .uid            = GLOBAL_ROOT_UID,
 105         .ratelimit      = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
 106 };
 107 
 108 /*
 109  * These routines must be called with the uidhash spinlock held!
 110  */
 111 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
 112 {
 113         hlist_add_head(&up->uidhash_node, hashent);
 114 }
 115 
 116 static void uid_hash_remove(struct user_struct *up)
 117 {
 118         hlist_del_init(&up->uidhash_node);
 119 }
 120 
 121 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
 122 {
 123         struct user_struct *user;
 124 
 125         hlist_for_each_entry(user, hashent, uidhash_node) {
 126                 if (uid_eq(user->uid, uid)) {
 127                         refcount_inc(&user->__count);
 128                         return user;
 129                 }
 130         }
 131 
 132         return NULL;
 133 }
 134 
 135 /* IRQs are disabled and uidhash_lock is held upon function entry.
 136  * IRQ state (as stored in flags) is restored and uidhash_lock released
 137  * upon function exit.
 138  */
 139 static void free_user(struct user_struct *up, unsigned long flags)
 140         __releases(&uidhash_lock)
 141 {
 142         uid_hash_remove(up);
 143         spin_unlock_irqrestore(&uidhash_lock, flags);
 144         kmem_cache_free(uid_cachep, up);
 145 }
 146 
 147 /*
 148  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
 149  * caller must undo that ref with free_uid().
 150  *
 151  * If the user_struct could not be found, return NULL.
 152  */
 153 struct user_struct *find_user(kuid_t uid)
 154 {
 155         struct user_struct *ret;
 156         unsigned long flags;
 157 
 158         spin_lock_irqsave(&uidhash_lock, flags);
 159         ret = uid_hash_find(uid, uidhashentry(uid));
 160         spin_unlock_irqrestore(&uidhash_lock, flags);
 161         return ret;
 162 }
 163 
 164 void free_uid(struct user_struct *up)
 165 {
 166         unsigned long flags;
 167 
 168         if (!up)
 169                 return;
 170 
 171         if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
 172                 free_user(up, flags);
 173 }
 174 
 175 struct user_struct *alloc_uid(kuid_t uid)
 176 {
 177         struct hlist_head *hashent = uidhashentry(uid);
 178         struct user_struct *up, *new;
 179 
 180         spin_lock_irq(&uidhash_lock);
 181         up = uid_hash_find(uid, hashent);
 182         spin_unlock_irq(&uidhash_lock);
 183 
 184         if (!up) {
 185                 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
 186                 if (!new)
 187                         return NULL;
 188 
 189                 new->uid = uid;
 190                 refcount_set(&new->__count, 1);
 191                 ratelimit_state_init(&new->ratelimit, HZ, 100);
 192                 ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
 193 
 194                 /*
 195                  * Before adding this, check whether we raced
 196                  * on adding the same user already..
 197                  */
 198                 spin_lock_irq(&uidhash_lock);
 199                 up = uid_hash_find(uid, hashent);
 200                 if (up) {
 201                         kmem_cache_free(uid_cachep, new);
 202                 } else {
 203                         uid_hash_insert(new, hashent);
 204                         up = new;
 205                 }
 206                 spin_unlock_irq(&uidhash_lock);
 207         }
 208 
 209         return up;
 210 }
 211 
 212 static int __init uid_cache_init(void)
 213 {
 214         int n;
 215 
 216         uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
 217                         0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 218 
 219         for(n = 0; n < UIDHASH_SZ; ++n)
 220                 INIT_HLIST_HEAD(uidhash_table + n);
 221 
 222         /* Insert the root user immediately (init already runs as root) */
 223         spin_lock_irq(&uidhash_lock);
 224         uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
 225         spin_unlock_irq(&uidhash_lock);
 226 
 227         return 0;
 228 }
 229 subsys_initcall(uid_cache_init);

/* [<][>][^][v][top][bottom][index][help] */