root/arch/powerpc/mm/nohash/mmu_context.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. steal_context_smp
  2. steal_all_contexts
  3. steal_context_up
  4. context_check_map
  5. context_check_map
  6. switch_mmu_context
  7. init_new_context
  8. destroy_context
  9. mmu_ctx_cpu_prepare
  10. mmu_ctx_cpu_dead
  11. mmu_context_init

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * This file contains the routines for handling the MMU on those
   4  * PowerPC implementations where the MMU is not using the hash
   5  * table, such as 8xx, 4xx, BookE's etc...
   6  *
   7  * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
   8  *                IBM Corp.
   9  *
  10  *  Derived from previous arch/powerpc/mm/mmu_context.c
  11  *  and arch/powerpc/include/asm/mmu_context.h
  12  *
  13  * TODO:
  14  *
  15  *   - The global context lock will not scale very well
  16  *   - The maps should be dynamically allocated to allow for processors
  17  *     that support more PID bits at runtime
  18  *   - Implement flush_tlb_mm() by making the context stale and picking
  19  *     a new one
  20  *   - More aggressively clear stale map bits and maybe find some way to
  21  *     also clear mm->cpu_vm_mask bits when processes are migrated
  22  */
  23 
  24 //#define DEBUG_MAP_CONSISTENCY
  25 //#define DEBUG_CLAMP_LAST_CONTEXT   31
  26 //#define DEBUG_HARDER
  27 
  28 /* We don't use DEBUG because it tends to be compiled in always nowadays
  29  * and this would generate way too much output
  30  */
  31 #ifdef DEBUG_HARDER
  32 #define pr_hard(args...)        printk(KERN_DEBUG args)
  33 #define pr_hardcont(args...)    printk(KERN_CONT args)
  34 #else
  35 #define pr_hard(args...)        do { } while(0)
  36 #define pr_hardcont(args...)    do { } while(0)
  37 #endif
  38 
  39 #include <linux/kernel.h>
  40 #include <linux/mm.h>
  41 #include <linux/init.h>
  42 #include <linux/spinlock.h>
  43 #include <linux/memblock.h>
  44 #include <linux/notifier.h>
  45 #include <linux/cpu.h>
  46 #include <linux/slab.h>
  47 
  48 #include <asm/mmu_context.h>
  49 #include <asm/tlbflush.h>
  50 
  51 #include <mm/mmu_decl.h>
  52 
  53 /*
  54  * The MPC8xx has only 16 contexts. We rotate through them on each task switch.
  55  * A better way would be to keep track of tasks that own contexts, and implement
  56  * an LRU usage. That way very active tasks don't always have to pay the TLB
  57  * reload overhead. The kernel pages are mapped shared, so the kernel can run on
  58  * behalf of any task that makes a kernel entry. Shared does not mean they are
  59  * not protected, just that the ASID comparison is not performed. -- Dan
  60  *
  61  * The IBM4xx has 256 contexts, so we can just rotate through these as a way of
  62  * "switching" contexts. If the TID of the TLB is zero, the PID/TID comparison
  63  * is disabled, so we can use a TID of zero to represent all kernel pages as
  64  * shared among all contexts. -- Dan
  65  *
  66  * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We should
  67  * normally never have to steal though the facility is present if needed.
  68  * -- BenH
  69  */
  70 #define FIRST_CONTEXT 1
  71 #ifdef DEBUG_CLAMP_LAST_CONTEXT
  72 #define LAST_CONTEXT DEBUG_CLAMP_LAST_CONTEXT
  73 #elif defined(CONFIG_PPC_8xx)
  74 #define LAST_CONTEXT 16
  75 #elif defined(CONFIG_PPC_47x)
  76 #define LAST_CONTEXT 65535
  77 #else
  78 #define LAST_CONTEXT 255
  79 #endif
  80 
  81 static unsigned int next_context, nr_free_contexts;
  82 static unsigned long *context_map;
  83 #ifdef CONFIG_SMP
  84 static unsigned long *stale_map[NR_CPUS];
  85 #endif
  86 static struct mm_struct **context_mm;
  87 static DEFINE_RAW_SPINLOCK(context_lock);
  88 
  89 #define CTX_MAP_SIZE    \
  90         (sizeof(unsigned long) * (LAST_CONTEXT / BITS_PER_LONG + 1))
  91 
  92 
  93 /* Steal a context from a task that has one at the moment.
  94  *
  95  * This is used when we are running out of available PID numbers
  96  * on the processors.
  97  *
  98  * This isn't an LRU system, it just frees up each context in
  99  * turn (sort-of pseudo-random replacement :).  This would be the
 100  * place to implement an LRU scheme if anyone was motivated to do it.
 101  *  -- paulus
 102  *
 103  * For context stealing, we use a slightly different approach for
 104  * SMP and UP. Basically, the UP one is simpler and doesn't use
 105  * the stale map as we can just flush the local CPU
 106  *  -- benh
 107  */
 108 #ifdef CONFIG_SMP
 109 static unsigned int steal_context_smp(unsigned int id)
 110 {
 111         struct mm_struct *mm;
 112         unsigned int cpu, max, i;
 113 
 114         max = LAST_CONTEXT - FIRST_CONTEXT;
 115 
 116         /* Attempt to free next_context first and then loop until we manage */
 117         while (max--) {
 118                 /* Pick up the victim mm */
 119                 mm = context_mm[id];
 120 
 121                 /* We have a candidate victim, check if it's active, on SMP
 122                  * we cannot steal active contexts
 123                  */
 124                 if (mm->context.active) {
 125                         id++;
 126                         if (id > LAST_CONTEXT)
 127                                 id = FIRST_CONTEXT;
 128                         continue;
 129                 }
 130                 pr_hardcont(" | steal %d from 0x%p", id, mm);
 131 
 132                 /* Mark this mm has having no context anymore */
 133                 mm->context.id = MMU_NO_CONTEXT;
 134 
 135                 /* Mark it stale on all CPUs that used this mm. For threaded
 136                  * implementations, we set it on all threads on each core
 137                  * represented in the mask. A future implementation will use
 138                  * a core map instead but this will do for now.
 139                  */
 140                 for_each_cpu(cpu, mm_cpumask(mm)) {
 141                         for (i = cpu_first_thread_sibling(cpu);
 142                              i <= cpu_last_thread_sibling(cpu); i++) {
 143                                 if (stale_map[i])
 144                                         __set_bit(id, stale_map[i]);
 145                         }
 146                         cpu = i - 1;
 147                 }
 148                 return id;
 149         }
 150 
 151         /* This will happen if you have more CPUs than available contexts,
 152          * all we can do here is wait a bit and try again
 153          */
 154         raw_spin_unlock(&context_lock);
 155         cpu_relax();
 156         raw_spin_lock(&context_lock);
 157 
 158         /* This will cause the caller to try again */
 159         return MMU_NO_CONTEXT;
 160 }
 161 #endif  /* CONFIG_SMP */
 162 
 163 static unsigned int steal_all_contexts(void)
 164 {
 165         struct mm_struct *mm;
 166 #ifdef CONFIG_SMP
 167         int cpu = smp_processor_id();
 168 #endif
 169         unsigned int id;
 170 
 171         for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
 172                 /* Pick up the victim mm */
 173                 mm = context_mm[id];
 174 
 175                 pr_hardcont(" | steal %d from 0x%p", id, mm);
 176 
 177                 /* Mark this mm as having no context anymore */
 178                 mm->context.id = MMU_NO_CONTEXT;
 179                 if (id != FIRST_CONTEXT) {
 180                         context_mm[id] = NULL;
 181                         __clear_bit(id, context_map);
 182 #ifdef DEBUG_MAP_CONSISTENCY
 183                         mm->context.active = 0;
 184 #endif
 185                 }
 186 #ifdef CONFIG_SMP
 187                 __clear_bit(id, stale_map[cpu]);
 188 #endif
 189         }
 190 
 191         /* Flush the TLB for all contexts (not to be used on SMP) */
 192         _tlbil_all();
 193 
 194         nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT;
 195 
 196         return FIRST_CONTEXT;
 197 }
 198 
 199 /* Note that this will also be called on SMP if all other CPUs are
 200  * offlined, which means that it may be called for cpu != 0. For
 201  * this to work, we somewhat assume that CPUs that are onlined
 202  * come up with a fully clean TLB (or are cleaned when offlined)
 203  */
 204 static unsigned int steal_context_up(unsigned int id)
 205 {
 206         struct mm_struct *mm;
 207 #ifdef CONFIG_SMP
 208         int cpu = smp_processor_id();
 209 #endif
 210 
 211         /* Pick up the victim mm */
 212         mm = context_mm[id];
 213 
 214         pr_hardcont(" | steal %d from 0x%p", id, mm);
 215 
 216         /* Flush the TLB for that context */
 217         local_flush_tlb_mm(mm);
 218 
 219         /* Mark this mm has having no context anymore */
 220         mm->context.id = MMU_NO_CONTEXT;
 221 
 222         /* XXX This clear should ultimately be part of local_flush_tlb_mm */
 223 #ifdef CONFIG_SMP
 224         __clear_bit(id, stale_map[cpu]);
 225 #endif
 226 
 227         return id;
 228 }
 229 
 230 #ifdef DEBUG_MAP_CONSISTENCY
 231 static void context_check_map(void)
 232 {
 233         unsigned int id, nrf, nact;
 234 
 235         nrf = nact = 0;
 236         for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
 237                 int used = test_bit(id, context_map);
 238                 if (!used)
 239                         nrf++;
 240                 if (used != (context_mm[id] != NULL))
 241                         pr_err("MMU: Context %d is %s and MM is %p !\n",
 242                                id, used ? "used" : "free", context_mm[id]);
 243                 if (context_mm[id] != NULL)
 244                         nact += context_mm[id]->context.active;
 245         }
 246         if (nrf != nr_free_contexts) {
 247                 pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
 248                        nr_free_contexts, nrf);
 249                 nr_free_contexts = nrf;
 250         }
 251         if (nact > num_online_cpus())
 252                 pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
 253                        nact, num_online_cpus());
 254         if (FIRST_CONTEXT > 0 && !test_bit(0, context_map))
 255                 pr_err("MMU: Context 0 has been freed !!!\n");
 256 }
 257 #else
 258 static void context_check_map(void) { }
 259 #endif
 260 
 261 void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
 262                         struct task_struct *tsk)
 263 {
 264         unsigned int id;
 265 #ifdef CONFIG_SMP
 266         unsigned int i, cpu = smp_processor_id();
 267 #endif
 268         unsigned long *map;
 269 
 270         /* No lockless fast path .. yet */
 271         raw_spin_lock(&context_lock);
 272 
 273         pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
 274                 cpu, next, next->context.active, next->context.id);
 275 
 276 #ifdef CONFIG_SMP
 277         /* Mark us active and the previous one not anymore */
 278         next->context.active++;
 279         if (prev) {
 280                 pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
 281                 WARN_ON(prev->context.active < 1);
 282                 prev->context.active--;
 283         }
 284 
 285  again:
 286 #endif /* CONFIG_SMP */
 287 
 288         /* If we already have a valid assigned context, skip all that */
 289         id = next->context.id;
 290         if (likely(id != MMU_NO_CONTEXT)) {
 291 #ifdef DEBUG_MAP_CONSISTENCY
 292                 if (context_mm[id] != next)
 293                         pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
 294                                next, id, id, context_mm[id]);
 295 #endif
 296                 goto ctxt_ok;
 297         }
 298 
 299         /* We really don't have a context, let's try to acquire one */
 300         id = next_context;
 301         if (id > LAST_CONTEXT)
 302                 id = FIRST_CONTEXT;
 303         map = context_map;
 304 
 305         /* No more free contexts, let's try to steal one */
 306         if (nr_free_contexts == 0) {
 307 #ifdef CONFIG_SMP
 308                 if (num_online_cpus() > 1) {
 309                         id = steal_context_smp(id);
 310                         if (id == MMU_NO_CONTEXT)
 311                                 goto again;
 312                         goto stolen;
 313                 }
 314 #endif /* CONFIG_SMP */
 315                 if (IS_ENABLED(CONFIG_PPC_8xx))
 316                         id = steal_all_contexts();
 317                 else
 318                         id = steal_context_up(id);
 319                 goto stolen;
 320         }
 321         nr_free_contexts--;
 322 
 323         /* We know there's at least one free context, try to find it */
 324         while (__test_and_set_bit(id, map)) {
 325                 id = find_next_zero_bit(map, LAST_CONTEXT+1, id);
 326                 if (id > LAST_CONTEXT)
 327                         id = FIRST_CONTEXT;
 328         }
 329  stolen:
 330         next_context = id + 1;
 331         context_mm[id] = next;
 332         next->context.id = id;
 333         pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
 334 
 335         context_check_map();
 336  ctxt_ok:
 337 
 338         /* If that context got marked stale on this CPU, then flush the
 339          * local TLB for it and unmark it before we use it
 340          */
 341 #ifdef CONFIG_SMP
 342         if (test_bit(id, stale_map[cpu])) {
 343                 pr_hardcont(" | stale flush %d [%d..%d]",
 344                             id, cpu_first_thread_sibling(cpu),
 345                             cpu_last_thread_sibling(cpu));
 346 
 347                 local_flush_tlb_mm(next);
 348 
 349                 /* XXX This clear should ultimately be part of local_flush_tlb_mm */
 350                 for (i = cpu_first_thread_sibling(cpu);
 351                      i <= cpu_last_thread_sibling(cpu); i++) {
 352                         if (stale_map[i])
 353                                 __clear_bit(id, stale_map[i]);
 354                 }
 355         }
 356 #endif
 357 
 358         /* Flick the MMU and release lock */
 359         pr_hardcont(" -> %d\n", id);
 360         set_context(id, next->pgd);
 361         raw_spin_unlock(&context_lock);
 362 }
 363 
 364 /*
 365  * Set up the context for a new address space.
 366  */
 367 int init_new_context(struct task_struct *t, struct mm_struct *mm)
 368 {
 369         pr_hard("initing context for mm @%p\n", mm);
 370 
 371         /*
 372          * We have MMU_NO_CONTEXT set to be ~0. Hence check
 373          * explicitly against context.id == 0. This ensures that we properly
 374          * initialize context slice details for newly allocated mm's (which will
 375          * have id == 0) and don't alter context slice inherited via fork (which
 376          * will have id != 0).
 377          */
 378         if (mm->context.id == 0)
 379                 slice_init_new_context_exec(mm);
 380         mm->context.id = MMU_NO_CONTEXT;
 381         mm->context.active = 0;
 382         pte_frag_set(&mm->context, NULL);
 383         return 0;
 384 }
 385 
 386 /*
 387  * We're finished using the context for an address space.
 388  */
 389 void destroy_context(struct mm_struct *mm)
 390 {
 391         unsigned long flags;
 392         unsigned int id;
 393 
 394         if (mm->context.id == MMU_NO_CONTEXT)
 395                 return;
 396 
 397         WARN_ON(mm->context.active != 0);
 398 
 399         raw_spin_lock_irqsave(&context_lock, flags);
 400         id = mm->context.id;
 401         if (id != MMU_NO_CONTEXT) {
 402                 __clear_bit(id, context_map);
 403                 mm->context.id = MMU_NO_CONTEXT;
 404 #ifdef DEBUG_MAP_CONSISTENCY
 405                 mm->context.active = 0;
 406 #endif
 407                 context_mm[id] = NULL;
 408                 nr_free_contexts++;
 409         }
 410         raw_spin_unlock_irqrestore(&context_lock, flags);
 411 }
 412 
 413 #ifdef CONFIG_SMP
 414 static int mmu_ctx_cpu_prepare(unsigned int cpu)
 415 {
 416         /* We don't touch CPU 0 map, it's allocated at aboot and kept
 417          * around forever
 418          */
 419         if (cpu == boot_cpuid)
 420                 return 0;
 421 
 422         pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
 423         stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
 424         return 0;
 425 }
 426 
 427 static int mmu_ctx_cpu_dead(unsigned int cpu)
 428 {
 429 #ifdef CONFIG_HOTPLUG_CPU
 430         if (cpu == boot_cpuid)
 431                 return 0;
 432 
 433         pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
 434         kfree(stale_map[cpu]);
 435         stale_map[cpu] = NULL;
 436 
 437         /* We also clear the cpu_vm_mask bits of CPUs going away */
 438         clear_tasks_mm_cpumask(cpu);
 439 #endif
 440         return 0;
 441 }
 442 
 443 #endif /* CONFIG_SMP */
 444 
 445 /*
 446  * Initialize the context management stuff.
 447  */
 448 void __init mmu_context_init(void)
 449 {
 450         /* Mark init_mm as being active on all possible CPUs since
 451          * we'll get called with prev == init_mm the first time
 452          * we schedule on a given CPU
 453          */
 454         init_mm.context.active = NR_CPUS;
 455 
 456         /*
 457          * Allocate the maps used by context management
 458          */
 459         context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
 460         if (!context_map)
 461                 panic("%s: Failed to allocate %zu bytes\n", __func__,
 462                       CTX_MAP_SIZE);
 463         context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1),
 464                                     SMP_CACHE_BYTES);
 465         if (!context_mm)
 466                 panic("%s: Failed to allocate %zu bytes\n", __func__,
 467                       sizeof(void *) * (LAST_CONTEXT + 1));
 468 #ifdef CONFIG_SMP
 469         stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
 470         if (!stale_map[boot_cpuid])
 471                 panic("%s: Failed to allocate %zu bytes\n", __func__,
 472                       CTX_MAP_SIZE);
 473 
 474         cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
 475                                   "powerpc/mmu/ctx:prepare",
 476                                   mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
 477 #endif
 478 
 479         printk(KERN_INFO
 480                "MMU: Allocated %zu bytes of context maps for %d contexts\n",
 481                2 * CTX_MAP_SIZE + (sizeof(void *) * (LAST_CONTEXT + 1)),
 482                LAST_CONTEXT - FIRST_CONTEXT + 1);
 483 
 484         /*
 485          * Some processors have too few contexts to reserve one for
 486          * init_mm, and require using context 0 for a normal task.
 487          * Other processors reserve the use of context zero for the kernel.
 488          * This code assumes FIRST_CONTEXT < 32.
 489          */
 490         context_map[0] = (1 << FIRST_CONTEXT) - 1;
 491         next_context = FIRST_CONTEXT;
 492         nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1;
 493 }

/* [<][>][^][v][top][bottom][index][help] */