root/arch/riscv/mm/context.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. flush_icache_deferred
  2. switch_mm

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * Copyright (C) 2012 Regents of the University of California
   4  * Copyright (C) 2017 SiFive
   5  */
   6 
   7 #include <linux/mm.h>
   8 #include <asm/tlbflush.h>
   9 #include <asm/cacheflush.h>
  10 #include <asm/mmu_context.h>
  11 
  12 /*
  13  * When necessary, performs a deferred icache flush for the given MM context,
  14  * on the local CPU.  RISC-V has no direct mechanism for instruction cache
  15  * shoot downs, so instead we send an IPI that informs the remote harts they
  16  * need to flush their local instruction caches.  To avoid pathologically slow
  17  * behavior in a common case (a bunch of single-hart processes on a many-hart
  18  * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
  19  * executing a MM context and instead schedule a deferred local instruction
  20  * cache flush to be performed before execution resumes on each hart.  This
  21  * actually performs that local instruction cache flush, which implicitly only
  22  * refers to the current hart.
  23  */
  24 static inline void flush_icache_deferred(struct mm_struct *mm)
  25 {
  26 #ifdef CONFIG_SMP
  27         unsigned int cpu = smp_processor_id();
  28         cpumask_t *mask = &mm->context.icache_stale_mask;
  29 
  30         if (cpumask_test_cpu(cpu, mask)) {
  31                 cpumask_clear_cpu(cpu, mask);
  32                 /*
  33                  * Ensure the remote hart's writes are visible to this hart.
  34                  * This pairs with a barrier in flush_icache_mm.
  35                  */
  36                 smp_mb();
  37                 local_flush_icache_all();
  38         }
  39 
  40 #endif
  41 }
  42 
  43 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  44         struct task_struct *task)
  45 {
  46         unsigned int cpu;
  47 
  48         if (unlikely(prev == next))
  49                 return;
  50 
  51         /*
  52          * Mark the current MM context as inactive, and the next as
  53          * active.  This is at least used by the icache flushing
  54          * routines in order to determine who should be flushed.
  55          */
  56         cpu = smp_processor_id();
  57 
  58         cpumask_clear_cpu(cpu, mm_cpumask(prev));
  59         cpumask_set_cpu(cpu, mm_cpumask(next));
  60 
  61         csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
  62         local_flush_tlb_all();
  63 
  64         flush_icache_deferred(next);
  65 }

/* [<][>][^][v][top][bottom][index][help] */