root/arch/unicore32/mm/pgd.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_pgd_slow
  2. free_pgd_slow

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * linux/arch/unicore32/mm/pgd.c
   4  *
   5  * Code specific to PKUnity SoC and UniCore ISA
   6  *
   7  * Copyright (C) 2001-2010 GUAN Xue-tao
   8  */
   9 #include <linux/mm.h>
  10 #include <linux/gfp.h>
  11 #include <linux/highmem.h>
  12 
  13 #include <asm/pgalloc.h>
  14 #include <asm/page.h>
  15 #include <asm/tlbflush.h>
  16 
  17 #include "mm.h"
  18 
  19 #define FIRST_KERNEL_PGD_NR     (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
  20 
  21 /*
  22  * need to get a 4k page for level 1
  23  */
  24 pgd_t *get_pgd_slow(struct mm_struct *mm)
  25 {
  26         pgd_t *new_pgd, *init_pgd;
  27         pmd_t *new_pmd, *init_pmd;
  28         pte_t *new_pte, *init_pte;
  29 
  30         new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 0);
  31         if (!new_pgd)
  32                 goto no_pgd;
  33 
  34         memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
  35 
  36         /*
  37          * Copy over the kernel and IO PGD entries
  38          */
  39         init_pgd = pgd_offset_k(0);
  40         memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
  41                        (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
  42 
  43         clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
  44 
  45         if (!vectors_high()) {
  46                 /*
  47                  * On UniCore, first page must always be allocated since it
  48                  * contains the machine vectors.
  49                  */
  50                 new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0);
  51                 if (!new_pmd)
  52                         goto no_pmd;
  53 
  54                 new_pte = pte_alloc_map(mm, new_pmd, 0);
  55                 if (!new_pte)
  56                         goto no_pte;
  57 
  58                 init_pmd = pmd_offset((pud_t *)init_pgd, 0);
  59                 init_pte = pte_offset_map(init_pmd, 0);
  60                 set_pte(new_pte, *init_pte);
  61                 pte_unmap(init_pte);
  62                 pte_unmap(new_pte);
  63         }
  64 
  65         return new_pgd;
  66 
  67 no_pte:
  68         pmd_free(mm, new_pmd);
  69         mm_dec_nr_pmds(mm);
  70 no_pmd:
  71         free_pages((unsigned long)new_pgd, 0);
  72 no_pgd:
  73         return NULL;
  74 }
  75 
  76 void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
  77 {
  78         pmd_t *pmd;
  79         pgtable_t pte;
  80 
  81         if (!pgd)
  82                 return;
  83 
  84         /* pgd is always present and good */
  85         pmd = pmd_off(pgd, 0);
  86         if (pmd_none(*pmd))
  87                 goto free;
  88         if (pmd_bad(*pmd)) {
  89                 pmd_ERROR(*pmd);
  90                 pmd_clear(pmd);
  91                 goto free;
  92         }
  93 
  94         pte = pmd_pgtable(*pmd);
  95         pmd_clear(pmd);
  96         pte_free(mm, pte);
  97         mm_dec_nr_ptes(mm);
  98         pmd_free(mm, pmd);
  99         mm_dec_nr_pmds(mm);
 100 free:
 101         free_pages((unsigned long) pgd, 0);
 102 }

/* [<][>][^][v][top][bottom][index][help] */