root/arch/x86/um/tls_32.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. do_set_thread_area
  2. do_get_thread_area
  3. get_free_idx
  4. clear_user_desc
  5. load_TLS
  6. needs_TLS_update
  7. clear_flushed_tls
  8. arch_switch_tls
  9. set_tls_entry
  10. arch_set_tls
  11. get_tls_entry
  12. SYSCALL_DEFINE1
  13. ptrace_set_thread_area
  14. SYSCALL_DEFINE1
  15. ptrace_get_thread_area
  16. __setup_host_supports_tls

   1 /*
   2  * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
   3  * Licensed under the GPL
   4  */
   5 
   6 #include <linux/percpu.h>
   7 #include <linux/sched.h>
   8 #include <linux/syscalls.h>
   9 #include <linux/uaccess.h>
  10 #include <asm/ptrace-abi.h>
  11 #include <os.h>
  12 #include <skas.h>
  13 #include <sysdep/tls.h>
  14 
  15 /*
  16  * If needed we can detect when it's uninitialized.
  17  *
  18  * These are initialized in an initcall and unchanged thereafter.
  19  */
  20 static int host_supports_tls = -1;
  21 int host_gdt_entry_tls_min;
  22 
  23 int do_set_thread_area(struct user_desc *info)
  24 {
  25         int ret;
  26         u32 cpu;
  27 
  28         cpu = get_cpu();
  29         ret = os_set_thread_area(info, userspace_pid[cpu]);
  30         put_cpu();
  31 
  32         if (ret)
  33                 printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
  34                        "index = %d\n", ret, info->entry_number);
  35 
  36         return ret;
  37 }
  38 
  39 int do_get_thread_area(struct user_desc *info)
  40 {
  41         int ret;
  42         u32 cpu;
  43 
  44         cpu = get_cpu();
  45         ret = os_get_thread_area(info, userspace_pid[cpu]);
  46         put_cpu();
  47 
  48         if (ret)
  49                 printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, "
  50                        "index = %d\n", ret, info->entry_number);
  51 
  52         return ret;
  53 }
  54 
  55 /*
  56  * sys_get_thread_area: get a yet unused TLS descriptor index.
  57  * XXX: Consider leaving one free slot for glibc usage at first place. This must
  58  * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
  59  *
  60  * Also, this must be tested when compiling in SKAS mode with dynamic linking
  61  * and running against NPTL.
  62  */
  63 static int get_free_idx(struct task_struct* task)
  64 {
  65         struct thread_struct *t = &task->thread;
  66         int idx;
  67 
  68         if (!t->arch.tls_array)
  69                 return GDT_ENTRY_TLS_MIN;
  70 
  71         for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
  72                 if (!t->arch.tls_array[idx].present)
  73                         return idx + GDT_ENTRY_TLS_MIN;
  74         return -ESRCH;
  75 }
  76 
  77 static inline void clear_user_desc(struct user_desc* info)
  78 {
  79         /* Postcondition: LDT_empty(info) returns true. */
  80         memset(info, 0, sizeof(*info));
  81 
  82         /*
  83          * Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
  84          * indeed an empty user_desc.
  85          */
  86         info->read_exec_only = 1;
  87         info->seg_not_present = 1;
  88 }
  89 
  90 #define O_FORCE 1
  91 
  92 static int load_TLS(int flags, struct task_struct *to)
  93 {
  94         int ret = 0;
  95         int idx;
  96 
  97         for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
  98                 struct uml_tls_struct* curr =
  99                         &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
 100 
 101                 /*
 102                  * Actually, now if it wasn't flushed it gets cleared and
 103                  * flushed to the host, which will clear it.
 104                  */
 105                 if (!curr->present) {
 106                         if (!curr->flushed) {
 107                                 clear_user_desc(&curr->tls);
 108                                 curr->tls.entry_number = idx;
 109                         } else {
 110                                 WARN_ON(!LDT_empty(&curr->tls));
 111                                 continue;
 112                         }
 113                 }
 114 
 115                 if (!(flags & O_FORCE) && curr->flushed)
 116                         continue;
 117 
 118                 ret = do_set_thread_area(&curr->tls);
 119                 if (ret)
 120                         goto out;
 121 
 122                 curr->flushed = 1;
 123         }
 124 out:
 125         return ret;
 126 }
 127 
 128 /*
 129  * Verify if we need to do a flush for the new process, i.e. if there are any
 130  * present desc's, only if they haven't been flushed.
 131  */
 132 static inline int needs_TLS_update(struct task_struct *task)
 133 {
 134         int i;
 135         int ret = 0;
 136 
 137         for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
 138                 struct uml_tls_struct* curr =
 139                         &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
 140 
 141                 /*
 142                  * Can't test curr->present, we may need to clear a descriptor
 143                  * which had a value.
 144                  */
 145                 if (curr->flushed)
 146                         continue;
 147                 ret = 1;
 148                 break;
 149         }
 150         return ret;
 151 }
 152 
 153 /*
 154  * On a newly forked process, the TLS descriptors haven't yet been flushed. So
 155  * we mark them as such and the first switch_to will do the job.
 156  */
 157 void clear_flushed_tls(struct task_struct *task)
 158 {
 159         int i;
 160 
 161         for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
 162                 struct uml_tls_struct* curr =
 163                         &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
 164 
 165                 /*
 166                  * Still correct to do this, if it wasn't present on the host it
 167                  * will remain as flushed as it was.
 168                  */
 169                 if (!curr->present)
 170                         continue;
 171 
 172                 curr->flushed = 0;
 173         }
 174 }
 175 
 176 /*
 177  * In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
 178  * common host process. So this is needed in SKAS0 too.
 179  *
 180  * However, if each thread had a different host process (and this was discussed
 181  * for SMP support) this won't be needed.
 182  *
 183  * And this will not need be used when (and if) we'll add support to the host
 184  * SKAS patch.
 185  */
 186 
 187 int arch_switch_tls(struct task_struct *to)
 188 {
 189         if (!host_supports_tls)
 190                 return 0;
 191 
 192         /*
 193          * We have no need whatsoever to switch TLS for kernel threads; beyond
 194          * that, that would also result in us calling os_set_thread_area with
 195          * userspace_pid[cpu] == 0, which gives an error.
 196          */
 197         if (likely(to->mm))
 198                 return load_TLS(O_FORCE, to);
 199 
 200         return 0;
 201 }
 202 
 203 static int set_tls_entry(struct task_struct* task, struct user_desc *info,
 204                          int idx, int flushed)
 205 {
 206         struct thread_struct *t = &task->thread;
 207 
 208         if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
 209                 return -EINVAL;
 210 
 211         t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
 212         t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
 213         t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
 214 
 215         return 0;
 216 }
 217 
 218 int arch_set_tls(struct task_struct *new, unsigned long tls)
 219 {
 220         struct user_desc info;
 221         int idx, ret = -EFAULT;
 222 
 223         if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
 224                 goto out;
 225 
 226         ret = -EINVAL;
 227         if (LDT_empty(&info))
 228                 goto out;
 229 
 230         idx = info.entry_number;
 231 
 232         ret = set_tls_entry(new, &info, idx, 0);
 233 out:
 234         return ret;
 235 }
 236 
 237 /* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
 238 static int get_tls_entry(struct task_struct *task, struct user_desc *info,
 239                          int idx)
 240 {
 241         struct thread_struct *t = &task->thread;
 242 
 243         if (!t->arch.tls_array)
 244                 goto clear;
 245 
 246         if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
 247                 return -EINVAL;
 248 
 249         if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
 250                 goto clear;
 251 
 252         *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
 253 
 254 out:
 255         /*
 256          * Temporary debugging check, to make sure that things have been
 257          * flushed. This could be triggered if load_TLS() failed.
 258          */
 259         if (unlikely(task == current &&
 260                      !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
 261                 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
 262                                 "without flushed TLS.", current->pid);
 263         }
 264 
 265         return 0;
 266 clear:
 267         /*
 268          * When the TLS entry has not been set, the values read to user in the
 269          * tls_array are 0 (because it's cleared at boot, see
 270          * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
 271          */
 272         clear_user_desc(info);
 273         info->entry_number = idx;
 274         goto out;
 275 }
 276 
 277 SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, user_desc)
 278 {
 279         struct user_desc info;
 280         int idx, ret;
 281 
 282         if (!host_supports_tls)
 283                 return -ENOSYS;
 284 
 285         if (copy_from_user(&info, user_desc, sizeof(info)))
 286                 return -EFAULT;
 287 
 288         idx = info.entry_number;
 289 
 290         if (idx == -1) {
 291                 idx = get_free_idx(current);
 292                 if (idx < 0)
 293                         return idx;
 294                 info.entry_number = idx;
 295                 /* Tell the user which slot we chose for him.*/
 296                 if (put_user(idx, &user_desc->entry_number))
 297                         return -EFAULT;
 298         }
 299 
 300         ret = do_set_thread_area(&info);
 301         if (ret)
 302                 return ret;
 303         return set_tls_entry(current, &info, idx, 1);
 304 }
 305 
 306 /*
 307  * Perform set_thread_area on behalf of the traced child.
 308  * Note: error handling is not done on the deferred load, and this differ from
 309  * i386. However the only possible error are caused by bugs.
 310  */
 311 int ptrace_set_thread_area(struct task_struct *child, int idx,
 312                            struct user_desc __user *user_desc)
 313 {
 314         struct user_desc info;
 315 
 316         if (!host_supports_tls)
 317                 return -EIO;
 318 
 319         if (copy_from_user(&info, user_desc, sizeof(info)))
 320                 return -EFAULT;
 321 
 322         return set_tls_entry(child, &info, idx, 0);
 323 }
 324 
 325 SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, user_desc)
 326 {
 327         struct user_desc info;
 328         int idx, ret;
 329 
 330         if (!host_supports_tls)
 331                 return -ENOSYS;
 332 
 333         if (get_user(idx, &user_desc->entry_number))
 334                 return -EFAULT;
 335 
 336         ret = get_tls_entry(current, &info, idx);
 337         if (ret < 0)
 338                 goto out;
 339 
 340         if (copy_to_user(user_desc, &info, sizeof(info)))
 341                 ret = -EFAULT;
 342 
 343 out:
 344         return ret;
 345 }
 346 
 347 /*
 348  * Perform get_thread_area on behalf of the traced child.
 349  */
 350 int ptrace_get_thread_area(struct task_struct *child, int idx,
 351                 struct user_desc __user *user_desc)
 352 {
 353         struct user_desc info;
 354         int ret;
 355 
 356         if (!host_supports_tls)
 357                 return -EIO;
 358 
 359         ret = get_tls_entry(child, &info, idx);
 360         if (ret < 0)
 361                 goto out;
 362 
 363         if (copy_to_user(user_desc, &info, sizeof(info)))
 364                 ret = -EFAULT;
 365 out:
 366         return ret;
 367 }
 368 
 369 /*
 370  * This code is really i386-only, but it detects and logs x86_64 GDT indexes
 371  * if a 32-bit UML is running on a 64-bit host.
 372  */
 373 static int __init __setup_host_supports_tls(void)
 374 {
 375         check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
 376         if (host_supports_tls) {
 377                 printk(KERN_INFO "Host TLS support detected\n");
 378                 printk(KERN_INFO "Detected host type: ");
 379                 switch (host_gdt_entry_tls_min) {
 380                 case GDT_ENTRY_TLS_MIN_I386:
 381                         printk(KERN_CONT "i386");
 382                         break;
 383                 case GDT_ENTRY_TLS_MIN_X86_64:
 384                         printk(KERN_CONT "x86_64");
 385                         break;
 386                 }
 387                 printk(KERN_CONT " (GDT indexes %d to %d)\n",
 388                        host_gdt_entry_tls_min,
 389                        host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
 390         } else
 391                 printk(KERN_ERR "  Host TLS support NOT detected! "
 392                                 "TLS support inside UML will not work\n");
 393         return 0;
 394 }
 395 
 396 __initcall(__setup_host_supports_tls);

/* [<][>][^][v][top][bottom][index][help] */