root/arch/unicore32/kernel/hibernate.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. resume_one_md_table_init
  2. resume_one_page_table_init
  3. resume_physical_mapping_init
  4. resume_init_first_level_page_table
  5. swsusp_arch_resume
  6. pfn_is_nosave
  7. save_processor_state
  8. restore_processor_state

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *  linux/arch/unicore32/kernel/hibernate.c
   4  *
   5  * Code specific to PKUnity SoC and UniCore ISA
   6  *
   7  *      Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
   8  *      Copyright (C) 2001-2010 Guan Xuetao
   9  */
  10 
  11 #include <linux/gfp.h>
  12 #include <linux/suspend.h>
  13 #include <linux/memblock.h>
  14 
  15 #include <asm/page.h>
  16 #include <asm/pgtable.h>
  17 #include <asm/pgalloc.h>
  18 #include <asm/sections.h>
  19 #include <asm/suspend.h>
  20 
  21 #include "mach/pm.h"
  22 
  23 /* Pointer to the temporary resume page tables */
  24 pgd_t *resume_pg_dir;
  25 
  26 struct swsusp_arch_regs swsusp_arch_regs_cpu0;
  27 
  28 /*
  29  * Create a middle page table on a resume-safe page and put a pointer to it in
  30  * the given global directory entry.  This only returns the gd entry
  31  * in non-PAE compilation mode, since the middle layer is folded.
  32  */
  33 static pmd_t *resume_one_md_table_init(pgd_t *pgd)
  34 {
  35         pud_t *pud;
  36         pmd_t *pmd_table;
  37 
  38         pud = pud_offset(pgd, 0);
  39         pmd_table = pmd_offset(pud, 0);
  40 
  41         return pmd_table;
  42 }
  43 
  44 /*
  45  * Create a page table on a resume-safe page and place a pointer to it in
  46  * a middle page directory entry.
  47  */
  48 static pte_t *resume_one_page_table_init(pmd_t *pmd)
  49 {
  50         if (pmd_none(*pmd)) {
  51                 pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
  52                 if (!page_table)
  53                         return NULL;
  54 
  55                 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_KERNEL_TABLE));
  56 
  57                 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  58 
  59                 return page_table;
  60         }
  61 
  62         return pte_offset_kernel(pmd, 0);
  63 }
  64 
  65 /*
  66  * This maps the physical memory to kernel virtual address space, a total
  67  * of max_low_pfn pages, by creating page tables starting from address
  68  * PAGE_OFFSET.  The page tables are allocated out of resume-safe pages.
  69  */
  70 static int resume_physical_mapping_init(pgd_t *pgd_base)
  71 {
  72         unsigned long pfn;
  73         pgd_t *pgd;
  74         pmd_t *pmd;
  75         pte_t *pte;
  76         int pgd_idx, pmd_idx;
  77 
  78         pgd_idx = pgd_index(PAGE_OFFSET);
  79         pgd = pgd_base + pgd_idx;
  80         pfn = 0;
  81 
  82         for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  83                 pmd = resume_one_md_table_init(pgd);
  84                 if (!pmd)
  85                         return -ENOMEM;
  86 
  87                 if (pfn >= max_low_pfn)
  88                         continue;
  89 
  90                 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
  91                         pte_t *max_pte;
  92 
  93                         if (pfn >= max_low_pfn)
  94                                 break;
  95 
  96                         /* Map with normal page tables.
  97                          * NOTE: We can mark everything as executable here
  98                          */
  99                         pte = resume_one_page_table_init(pmd);
 100                         if (!pte)
 101                                 return -ENOMEM;
 102 
 103                         max_pte = pte + PTRS_PER_PTE;
 104                         for (; pte < max_pte; pte++, pfn++) {
 105                                 if (pfn >= max_low_pfn)
 106                                         break;
 107 
 108                                 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
 109                         }
 110                 }
 111         }
 112 
 113         return 0;
 114 }
 115 
 116 static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
 117 {
 118 }
 119 
 120 int swsusp_arch_resume(void)
 121 {
 122         int error;
 123 
 124         resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
 125         if (!resume_pg_dir)
 126                 return -ENOMEM;
 127 
 128         resume_init_first_level_page_table(resume_pg_dir);
 129         error = resume_physical_mapping_init(resume_pg_dir);
 130         if (error)
 131                 return error;
 132 
 133         /* We have got enough memory and from now on we cannot recover */
 134         restore_image(resume_pg_dir, restore_pblist);
 135         return 0;
 136 }
 137 
 138 /*
 139  *      pfn_is_nosave - check if given pfn is in the 'nosave' section
 140  */
 141 
 142 int pfn_is_nosave(unsigned long pfn)
 143 {
 144         unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
 145         unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
 146 
 147         return (pfn >= begin_pfn) && (pfn < end_pfn);
 148 }
 149 
 150 void save_processor_state(void)
 151 {
 152 }
 153 
 154 void restore_processor_state(void)
 155 {
 156         local_flush_tlb_all();
 157 }

/* [<][>][^][v][top][bottom][index][help] */