root/arch/nds32/mm/proc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. va_kernel_present
  2. va_present
  3. va_readable
  4. va_writable
  5. cpu_icache_inval_all
  6. cpu_dcache_inval_all
  7. dcache_wb_all_level
  8. cpu_dcache_wb_all
  9. cpu_dcache_wbinval_all
  10. cpu_icache_inval_page
  11. cpu_dcache_inval_page
  12. cpu_dcache_wb_page
  13. cpu_dcache_wbinval_page
  14. cpu_cache_wbinval_page
  15. cpu_icache_inval_range
  16. cpu_dcache_inval_range
  17. cpu_dcache_wb_range
  18. cpu_dcache_wbinval_range
  19. cpu_cache_wbinval_range
  20. cpu_cache_wbinval_range_check
  21. cpu_l2cache_op
  22. cpu_dma_wb_range
  23. cpu_dma_inval_range
  24. cpu_dma_wbinval_range
  25. cpu_proc_init
  26. cpu_proc_fin
  27. cpu_do_idle
  28. cpu_reset
  29. cpu_switch_mm

   1 // SPDX-License-Identifier: GPL-2.0
   2 // Copyright (C) 2005-2017 Andes Technology Corporation
   3 
   4 #include <linux/module.h>
   5 #include <linux/sched.h>
   6 #include <linux/mm.h>
   7 #include <asm/nds32.h>
   8 #include <asm/pgtable.h>
   9 #include <asm/tlbflush.h>
  10 #include <asm/cacheflush.h>
  11 #include <asm/l2_cache.h>
  12 #include <nds32_intrinsic.h>
  13 
  14 #include <asm/cache_info.h>
  15 extern struct cache_info L1_cache_info[2];
  16 
  17 int va_kernel_present(unsigned long addr)
  18 {
  19         pmd_t *pmd;
  20         pte_t *ptep, pte;
  21 
  22         pmd = pmd_offset(pgd_offset_k(addr), addr);
  23         if (!pmd_none(*pmd)) {
  24                 ptep = pte_offset_map(pmd, addr);
  25                 pte = *ptep;
  26                 if (pte_present(pte))
  27                         return pte;
  28         }
  29         return 0;
  30 }
  31 
  32 pte_t va_present(struct mm_struct * mm, unsigned long addr)
  33 {
  34         pgd_t *pgd;
  35         pud_t *pud;
  36         pmd_t *pmd;
  37         pte_t *ptep, pte;
  38 
  39         pgd = pgd_offset(mm, addr);
  40         if (!pgd_none(*pgd)) {
  41                 pud = pud_offset(pgd, addr);
  42                 if (!pud_none(*pud)) {
  43                         pmd = pmd_offset(pud, addr);
  44                         if (!pmd_none(*pmd)) {
  45                                 ptep = pte_offset_map(pmd, addr);
  46                                 pte = *ptep;
  47                                 if (pte_present(pte))
  48                                         return pte;
  49                         }
  50                 }
  51         }
  52         return 0;
  53 
  54 }
  55 
  56 int va_readable(struct pt_regs *regs, unsigned long addr)
  57 {
  58         struct mm_struct *mm = current->mm;
  59         pte_t pte;
  60         int ret = 0;
  61 
  62         if (user_mode(regs)) {
  63                 /* user mode */
  64                 pte = va_present(mm, addr);
  65                 if (!pte && pte_read(pte))
  66                         ret = 1;
  67         } else {
  68                 /* superuser mode is always readable, so we can only
  69                  * check it is present or not*/
  70                 return (! !va_kernel_present(addr));
  71         }
  72         return ret;
  73 }
  74 
  75 int va_writable(struct pt_regs *regs, unsigned long addr)
  76 {
  77         struct mm_struct *mm = current->mm;
  78         pte_t pte;
  79         int ret = 0;
  80 
  81         if (user_mode(regs)) {
  82                 /* user mode */
  83                 pte = va_present(mm, addr);
  84                 if (!pte && pte_write(pte))
  85                         ret = 1;
  86         } else {
  87                 /* superuser mode */
  88                 pte = va_kernel_present(addr);
  89                 if (!pte && pte_kernel_write(pte))
  90                         ret = 1;
  91         }
  92         return ret;
  93 }
  94 
  95 /*
  96  * All
  97  */
  98 void cpu_icache_inval_all(void)
  99 {
 100         unsigned long end, line_size;
 101 
 102         line_size = L1_cache_info[ICACHE].line_size;
 103         end =
 104             line_size * L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].sets;
 105 
 106         do {
 107                 end -= line_size;
 108                 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
 109                 end -= line_size;
 110                 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
 111                 end -= line_size;
 112                 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
 113                 end -= line_size;
 114                 __asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
 115         } while (end > 0);
 116         __nds32__isb();
 117 }
 118 
 119 void cpu_dcache_inval_all(void)
 120 {
 121         __nds32__cctl_l1d_invalall();
 122 }
 123 
 124 #ifdef CONFIG_CACHE_L2
 125 void dcache_wb_all_level(void)
 126 {
 127         unsigned long flags, cmd;
 128         local_irq_save(flags);
 129         __nds32__cctl_l1d_wball_alvl();
 130         /* Section 1: Ensure the section 2 & 3 program code execution after */
 131         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
 132 
 133         /* Section 2: Confirm the writeback all level is done in CPU and L2C */
 134         cmd = CCTL_CMD_L2_SYNC;
 135         L2_CMD_RDY();
 136         L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
 137         L2_CMD_RDY();
 138 
 139         /* Section 3: Writeback whole L2 cache */
 140         cmd = CCTL_ALL_CMD | CCTL_CMD_L2_IX_WB;
 141         L2_CMD_RDY();
 142         L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
 143         L2_CMD_RDY();
 144         __nds32__msync_all();
 145         local_irq_restore(flags);
 146 }
 147 EXPORT_SYMBOL(dcache_wb_all_level);
 148 #endif
 149 
 150 void cpu_dcache_wb_all(void)
 151 {
 152         __nds32__cctl_l1d_wball_one_lvl();
 153         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
 154 }
 155 
 156 void cpu_dcache_wbinval_all(void)
 157 {
 158 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 159         unsigned long flags;
 160         local_irq_save(flags);
 161 #endif
 162         cpu_dcache_wb_all();
 163         cpu_dcache_inval_all();
 164 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 165         local_irq_restore(flags);
 166 #endif
 167 }
 168 
 169 /*
 170  * Page
 171  */
 172 void cpu_icache_inval_page(unsigned long start)
 173 {
 174         unsigned long line_size, end;
 175 
 176         line_size = L1_cache_info[ICACHE].line_size;
 177         end = start + PAGE_SIZE;
 178 
 179         do {
 180                 end -= line_size;
 181                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
 182                 end -= line_size;
 183                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
 184                 end -= line_size;
 185                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
 186                 end -= line_size;
 187                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
 188         } while (end != start);
 189         __nds32__isb();
 190 }
 191 
 192 void cpu_dcache_inval_page(unsigned long start)
 193 {
 194         unsigned long line_size, end;
 195 
 196         line_size = L1_cache_info[DCACHE].line_size;
 197         end = start + PAGE_SIZE;
 198 
 199         do {
 200                 end -= line_size;
 201                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 202                 end -= line_size;
 203                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 204                 end -= line_size;
 205                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 206                 end -= line_size;
 207                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 208         } while (end != start);
 209 }
 210 
 211 void cpu_dcache_wb_page(unsigned long start)
 212 {
 213 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 214         unsigned long line_size, end;
 215 
 216         line_size = L1_cache_info[DCACHE].line_size;
 217         end = start + PAGE_SIZE;
 218 
 219         do {
 220                 end -= line_size;
 221                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 222                 end -= line_size;
 223                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 224                 end -= line_size;
 225                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 226                 end -= line_size;
 227                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 228         } while (end != start);
 229         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
 230 #endif
 231 }
 232 
 233 void cpu_dcache_wbinval_page(unsigned long start)
 234 {
 235         unsigned long line_size, end;
 236 
 237         line_size = L1_cache_info[DCACHE].line_size;
 238         end = start + PAGE_SIZE;
 239 
 240         do {
 241                 end -= line_size;
 242 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 243                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 244 #endif
 245                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 246                 end -= line_size;
 247 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 248                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 249 #endif
 250                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 251                 end -= line_size;
 252 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 253                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 254 #endif
 255                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 256                 end -= line_size;
 257 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 258                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
 259 #endif
 260                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
 261         } while (end != start);
 262         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
 263 }
 264 
 265 void cpu_cache_wbinval_page(unsigned long page, int flushi)
 266 {
 267         cpu_dcache_wbinval_page(page);
 268         if (flushi)
 269                 cpu_icache_inval_page(page);
 270 }
 271 
 272 /*
 273  * Range
 274  */
 275 void cpu_icache_inval_range(unsigned long start, unsigned long end)
 276 {
 277         unsigned long line_size;
 278 
 279         line_size = L1_cache_info[ICACHE].line_size;
 280 
 281         while (end > start) {
 282                 __asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start));
 283                 start += line_size;
 284         }
 285         __nds32__isb();
 286 }
 287 
 288 void cpu_dcache_inval_range(unsigned long start, unsigned long end)
 289 {
 290         unsigned long line_size;
 291 
 292         line_size = L1_cache_info[DCACHE].line_size;
 293 
 294         while (end > start) {
 295                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
 296                 start += line_size;
 297         }
 298 }
 299 
 300 void cpu_dcache_wb_range(unsigned long start, unsigned long end)
 301 {
 302 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 303         unsigned long line_size;
 304 
 305         line_size = L1_cache_info[DCACHE].line_size;
 306 
 307         while (end > start) {
 308                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
 309                 start += line_size;
 310         }
 311         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
 312 #endif
 313 }
 314 
 315 void cpu_dcache_wbinval_range(unsigned long start, unsigned long end)
 316 {
 317         unsigned long line_size;
 318 
 319         line_size = L1_cache_info[DCACHE].line_size;
 320 
 321         while (end > start) {
 322 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 323                 __asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
 324 #endif
 325                 __asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
 326                 start += line_size;
 327         }
 328         __nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
 329 }
 330 
 331 void cpu_cache_wbinval_range(unsigned long start, unsigned long end, int flushi)
 332 {
 333         unsigned long line_size, align_start, align_end;
 334 
 335         line_size = L1_cache_info[DCACHE].line_size;
 336         align_start = start & ~(line_size - 1);
 337         align_end = (end + line_size - 1) & ~(line_size - 1);
 338         cpu_dcache_wbinval_range(align_start, align_end);
 339 
 340         if (flushi) {
 341                 line_size = L1_cache_info[ICACHE].line_size;
 342                 align_start = start & ~(line_size - 1);
 343                 align_end = (end + line_size - 1) & ~(line_size - 1);
 344                 cpu_icache_inval_range(align_start, align_end);
 345         }
 346 }
 347 
 348 void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
 349                                    unsigned long start, unsigned long end,
 350                                    bool flushi, bool wbd)
 351 {
 352         unsigned long line_size, t_start, t_end;
 353 
 354         if (!flushi && !wbd)
 355                 return;
 356         line_size = L1_cache_info[DCACHE].line_size;
 357         start = start & ~(line_size - 1);
 358         end = (end + line_size - 1) & ~(line_size - 1);
 359 
 360         if ((end - start) > (8 * PAGE_SIZE)) {
 361                 if (wbd)
 362                         cpu_dcache_wbinval_all();
 363                 if (flushi)
 364                         cpu_icache_inval_all();
 365                 return;
 366         }
 367 
 368         t_start = (start + PAGE_SIZE) & PAGE_MASK;
 369         t_end = ((end - 1) & PAGE_MASK);
 370 
 371         if ((start & PAGE_MASK) == t_end) {
 372                 if (va_present(vma->vm_mm, start)) {
 373                         if (wbd)
 374                                 cpu_dcache_wbinval_range(start, end);
 375                         if (flushi)
 376                                 cpu_icache_inval_range(start, end);
 377                 }
 378                 return;
 379         }
 380 
 381         if (va_present(vma->vm_mm, start)) {
 382                 if (wbd)
 383                         cpu_dcache_wbinval_range(start, t_start);
 384                 if (flushi)
 385                         cpu_icache_inval_range(start, t_start);
 386         }
 387 
 388         if (va_present(vma->vm_mm, end - 1)) {
 389                 if (wbd)
 390                         cpu_dcache_wbinval_range(t_end, end);
 391                 if (flushi)
 392                         cpu_icache_inval_range(t_end, end);
 393         }
 394 
 395         while (t_start < t_end) {
 396                 if (va_present(vma->vm_mm, t_start)) {
 397                         if (wbd)
 398                                 cpu_dcache_wbinval_page(t_start);
 399                         if (flushi)
 400                                 cpu_icache_inval_page(t_start);
 401                 }
 402                 t_start += PAGE_SIZE;
 403         }
 404 }
 405 
 406 #ifdef CONFIG_CACHE_L2
 407 static inline void cpu_l2cache_op(unsigned long start, unsigned long end, unsigned long op)
 408 {
 409         if (atl2c_base) {
 410                 unsigned long p_start = __pa(start);
 411                 unsigned long p_end = __pa(end);
 412                 unsigned long cmd;
 413                 unsigned long line_size;
 414                 /* TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE */
 415                 line_size = L2_CACHE_LINE_SIZE();
 416                 p_start = p_start & (~(line_size - 1));
 417                 p_end = (p_end + line_size - 1) & (~(line_size - 1));
 418                 cmd =
 419                     (p_start & ~(line_size - 1)) | op |
 420                     CCTL_SINGLE_CMD;
 421                 do {
 422                         L2_CMD_RDY();
 423                         L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
 424                         cmd += line_size;
 425                         p_start += line_size;
 426                 } while (p_end > p_start);
 427                 cmd = CCTL_CMD_L2_SYNC;
 428                 L2_CMD_RDY();
 429                 L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
 430                 L2_CMD_RDY();
 431         }
 432 }
 433 #else
 434 #define cpu_l2cache_op(start,end,op) do { } while (0)
 435 #endif
 436 /*
 437  * DMA
 438  */
 439 void cpu_dma_wb_range(unsigned long start, unsigned long end)
 440 {
 441         unsigned long line_size;
 442         unsigned long flags;
 443         line_size = L1_cache_info[DCACHE].line_size;
 444         start = start & (~(line_size - 1));
 445         end = (end + line_size - 1) & (~(line_size - 1));
 446         if (unlikely(start == end))
 447                 return;
 448 
 449         local_irq_save(flags);
 450         cpu_dcache_wb_range(start, end);
 451         cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WB);
 452         __nds32__msync_all();
 453         local_irq_restore(flags);
 454 }
 455 
 456 void cpu_dma_inval_range(unsigned long start, unsigned long end)
 457 {
 458         unsigned long line_size;
 459         unsigned long old_start = start;
 460         unsigned long old_end = end;
 461         unsigned long flags;
 462         line_size = L1_cache_info[DCACHE].line_size;
 463         start = start & (~(line_size - 1));
 464         end = (end + line_size - 1) & (~(line_size - 1));
 465         if (unlikely(start == end))
 466                 return;
 467         local_irq_save(flags);
 468         if (start != old_start) {
 469                 cpu_dcache_wbinval_range(start, start + line_size);
 470                 cpu_l2cache_op(start, start + line_size, CCTL_CMD_L2_PA_WBINVAL);
 471         }
 472         if (end != old_end) {
 473                 cpu_dcache_wbinval_range(end - line_size, end);
 474                 cpu_l2cache_op(end - line_size, end, CCTL_CMD_L2_PA_WBINVAL);
 475         }
 476         cpu_dcache_inval_range(start, end);
 477         cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_INVAL);
 478         __nds32__msync_all();
 479         local_irq_restore(flags);
 480 
 481 }
 482 
 483 void cpu_dma_wbinval_range(unsigned long start, unsigned long end)
 484 {
 485         unsigned long line_size;
 486         unsigned long flags;
 487         line_size = L1_cache_info[DCACHE].line_size;
 488         start = start & (~(line_size - 1));
 489         end = (end + line_size - 1) & (~(line_size - 1));
 490         if (unlikely(start == end))
 491                 return;
 492 
 493         local_irq_save(flags);
 494         cpu_dcache_wbinval_range(start, end);
 495         cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WBINVAL);
 496         __nds32__msync_all();
 497         local_irq_restore(flags);
 498 }
 499 
 500 void cpu_proc_init(void)
 501 {
 502 }
 503 
 504 void cpu_proc_fin(void)
 505 {
 506 }
 507 
 508 void cpu_do_idle(void)
 509 {
 510         __nds32__standby_no_wake_grant();
 511 }
 512 
 513 void cpu_reset(unsigned long reset)
 514 {
 515         u32 tmp;
 516         GIE_DISABLE();
 517         tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
 518         tmp &= ~(CACHE_CTL_mskIC_EN | CACHE_CTL_mskDC_EN);
 519         __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
 520         cpu_dcache_wbinval_all();
 521         cpu_icache_inval_all();
 522 
 523         __asm__ __volatile__("jr.toff %0\n\t"::"r"(reset));
 524 }
 525 
 526 void cpu_switch_mm(struct mm_struct *mm)
 527 {
 528         unsigned long cid;
 529         cid = __nds32__mfsr(NDS32_SR_TLB_MISC);
 530         cid = (cid & ~TLB_MISC_mskCID) | mm->context.id;
 531         __nds32__mtsr_dsb(cid, NDS32_SR_TLB_MISC);
 532         __nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB);
 533 }

/* [<][>][^][v][top][bottom][index][help] */