root/arch/ia64/include/asm/acpi.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. acpi_has_cpu_in_madt
  2. disable_acpi
  3. arch_has_acpi_pdc
  4. arch_acpi_set_pdc_bits
  5. per_cpu_scan_finalize

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  *  Copyright (C) 1999 VA Linux Systems
   4  *  Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
   5  *  Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
   6  *  Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   7  */
   8 
   9 #ifndef _ASM_ACPI_H
  10 #define _ASM_ACPI_H
  11 
  12 #ifdef __KERNEL__
  13 
  14 #include <acpi/pdc_intel.h>
  15 
  16 #include <linux/init.h>
  17 #include <linux/numa.h>
  18 #include <asm/numa.h>
  19 
  20 
  21 extern int acpi_lapic;
  22 #define acpi_disabled 0 /* ACPI always enabled on IA64 */
  23 #define acpi_noirq 0    /* ACPI always enabled on IA64 */
  24 #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
  25 #define acpi_strict 1   /* no ACPI spec workarounds on IA64 */
  26 
  27 static inline bool acpi_has_cpu_in_madt(void)
  28 {
  29         return !!acpi_lapic;
  30 }
  31 
  32 #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
  33 static inline void disable_acpi(void) { }
  34 
  35 int acpi_request_vector (u32 int_type);
  36 int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
  37 
  38 /* Low-level suspend routine. */
  39 extern int acpi_suspend_lowlevel(void);
  40 
  41 extern unsigned long acpi_wakeup_address;
  42 
  43 /*
  44  * Record the cpei override flag and current logical cpu. This is
  45  * useful for CPU removal.
  46  */
  47 extern unsigned int can_cpei_retarget(void);
  48 extern unsigned int is_cpu_cpei_target(unsigned int cpu);
  49 extern void set_cpei_target_cpu(unsigned int cpu);
  50 extern unsigned int get_cpei_target_cpu(void);
  51 extern void prefill_possible_map(void);
  52 #ifdef CONFIG_ACPI_HOTPLUG_CPU
  53 extern int additional_cpus;
  54 #else
  55 #define additional_cpus 0
  56 #endif
  57 
  58 #ifdef CONFIG_ACPI_NUMA
  59 #if MAX_NUMNODES > 256
  60 #define MAX_PXM_DOMAINS MAX_NUMNODES
  61 #else
  62 #define MAX_PXM_DOMAINS (256)
  63 #endif
  64 extern int pxm_to_nid_map[MAX_PXM_DOMAINS];
  65 extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
  66 #endif
  67 
  68 static inline bool arch_has_acpi_pdc(void) { return true; }
  69 static inline void arch_acpi_set_pdc_bits(u32 *buf)
  70 {
  71         buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
  72 }
  73 
  74 #define acpi_unlazy_tlb(x)
  75 
  76 #ifdef CONFIG_ACPI_NUMA
  77 extern cpumask_t early_cpu_possible_map;
  78 #define for_each_possible_early_cpu(cpu)  \
  79         for_each_cpu((cpu), &early_cpu_possible_map)
  80 
  81 static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
  82 {
  83         int low_cpu, high_cpu;
  84         int cpu;
  85         int next_nid = 0;
  86 
  87         low_cpu = cpumask_weight(&early_cpu_possible_map);
  88 
  89         high_cpu = max(low_cpu, min_cpus);
  90         high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
  91 
  92         for (cpu = low_cpu; cpu < high_cpu; cpu++) {
  93                 cpumask_set_cpu(cpu, &early_cpu_possible_map);
  94                 if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
  95                         node_cpuid[cpu].nid = next_nid;
  96                         next_nid++;
  97                         if (next_nid >= num_online_nodes())
  98                                 next_nid = 0;
  99                 }
 100         }
 101 }
 102 
 103 extern void acpi_numa_fixup(void);
 104 
 105 #endif /* CONFIG_ACPI_NUMA */
 106 
 107 #endif /*__KERNEL__*/
 108 
 109 #endif /*_ASM_ACPI_H*/

/* [<][>][^][v][top][bottom][index][help] */