1#ifndef _ASM_X86_SMP_H 2#define _ASM_X86_SMP_H 3#ifndef __ASSEMBLY__ 4#include <linux/cpumask.h> 5#include <asm/percpu.h> 6 7/* 8 * We need the APIC definitions automatically as part of 'smp.h' 9 */ 10#ifdef CONFIG_X86_LOCAL_APIC 11# include <asm/mpspec.h> 12# include <asm/apic.h> 13# ifdef CONFIG_X86_IO_APIC 14# include <asm/io_apic.h> 15# endif 16#endif 17#include <asm/thread_info.h> 18#include <asm/cpumask.h> 19#include <asm/cpufeature.h> 20 21extern int smp_num_siblings; 22extern unsigned int num_processors; 23 24static inline bool cpu_has_ht_siblings(void) 25{ 26 bool has_siblings = false; 27#ifdef CONFIG_SMP 28 has_siblings = cpu_has_ht && smp_num_siblings > 1; 29#endif 30 return has_siblings; 31} 32 33DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); 34DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); 35/* cpus sharing the last level cache: */ 36DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); 37DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id); 38DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); 39 40static inline struct cpumask *cpu_llc_shared_mask(int cpu) 41{ 42 return per_cpu(cpu_llc_shared_map, cpu); 43} 44 45DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid); 46DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid); 47#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) 48DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid); 49#endif 50 51/* Static state in head.S used to set up a CPU */ 52extern unsigned long stack_start; /* Initial stack pointer address */ 53 54struct task_struct; 55 56struct smp_ops { 57 void (*smp_prepare_boot_cpu)(void); 58 void (*smp_prepare_cpus)(unsigned max_cpus); 59 void (*smp_cpus_done)(unsigned max_cpus); 60 61 void (*stop_other_cpus)(int wait); 62 void (*smp_send_reschedule)(int cpu); 63 64 int (*cpu_up)(unsigned cpu, struct task_struct *tidle); 65 int (*cpu_disable)(void); 66 void (*cpu_die)(unsigned int cpu); 67 void (*play_dead)(void); 68 69 void (*send_call_func_ipi)(const struct cpumask *mask); 70 void (*send_call_func_single_ipi)(int cpu); 71}; 72 73/* Globals due to paravirt */ 74extern void set_cpu_sibling_map(int cpu); 75 76#ifdef CONFIG_SMP 77#ifndef CONFIG_PARAVIRT 78#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0) 79#endif 80extern struct smp_ops smp_ops; 81 82static inline void smp_send_stop(void) 83{ 84 smp_ops.stop_other_cpus(0); 85} 86 87static inline void stop_other_cpus(void) 88{ 89 smp_ops.stop_other_cpus(1); 90} 91 92static inline void smp_prepare_boot_cpu(void) 93{ 94 smp_ops.smp_prepare_boot_cpu(); 95} 96 97static inline void smp_prepare_cpus(unsigned int max_cpus) 98{ 99 smp_ops.smp_prepare_cpus(max_cpus); 100} 101 102static inline void smp_cpus_done(unsigned int max_cpus) 103{ 104 smp_ops.smp_cpus_done(max_cpus); 105} 106 107static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle) 108{ 109 return smp_ops.cpu_up(cpu, tidle); 110} 111 112static inline int __cpu_disable(void) 113{ 114 return smp_ops.cpu_disable(); 115} 116 117static inline void __cpu_die(unsigned int cpu) 118{ 119 smp_ops.cpu_die(cpu); 120} 121 122static inline void play_dead(void) 123{ 124 smp_ops.play_dead(); 125} 126 127static inline void smp_send_reschedule(int cpu) 128{ 129 smp_ops.smp_send_reschedule(cpu); 130} 131 132static inline void arch_send_call_function_single_ipi(int cpu) 133{ 134 smp_ops.send_call_func_single_ipi(cpu); 135} 136 137static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) 138{ 139 smp_ops.send_call_func_ipi(mask); 140} 141 142void cpu_disable_common(void); 143void native_smp_prepare_boot_cpu(void); 144void native_smp_prepare_cpus(unsigned int max_cpus); 145void native_smp_cpus_done(unsigned int max_cpus); 146void common_cpu_up(unsigned int cpunum, struct task_struct *tidle); 147int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); 148int native_cpu_disable(void); 149int common_cpu_die(unsigned int cpu); 150void native_cpu_die(unsigned int cpu); 151void native_play_dead(void); 152void play_dead_common(void); 153void wbinvd_on_cpu(int cpu); 154int wbinvd_on_all_cpus(void); 155 156void native_send_call_func_ipi(const struct cpumask *mask); 157void native_send_call_func_single_ipi(int cpu); 158void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); 159 160void smp_store_boot_cpu_info(void); 161void smp_store_cpu_info(int id); 162#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) 163 164#else /* !CONFIG_SMP */ 165#define wbinvd_on_cpu(cpu) wbinvd() 166static inline int wbinvd_on_all_cpus(void) 167{ 168 wbinvd(); 169 return 0; 170} 171#endif /* CONFIG_SMP */ 172 173extern unsigned disabled_cpus; 174 175#ifdef CONFIG_X86_32_SMP 176/* 177 * This function is needed by all SMP systems. It must _always_ be valid 178 * from the initial startup. We map APIC_BASE very early in page_setup(), 179 * so this is correct in the x86 case. 180 */ 181#define raw_smp_processor_id() (this_cpu_read(cpu_number)) 182extern int safe_smp_processor_id(void); 183 184#elif defined(CONFIG_X86_64_SMP) 185#define raw_smp_processor_id() (this_cpu_read(cpu_number)) 186 187#define stack_smp_processor_id() \ 188({ \ 189 struct thread_info *ti; \ 190 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \ 191 ti->cpu; \ 192}) 193#define safe_smp_processor_id() smp_processor_id() 194 195#endif 196 197#ifdef CONFIG_X86_LOCAL_APIC 198 199#ifndef CONFIG_X86_64 200static inline int logical_smp_processor_id(void) 201{ 202 /* we don't want to mark this access volatile - bad code generation */ 203 return GET_APIC_LOGICAL_ID(apic_read(APIC_LDR)); 204} 205 206#endif 207 208extern int hard_smp_processor_id(void); 209 210#else /* CONFIG_X86_LOCAL_APIC */ 211 212# ifndef CONFIG_SMP 213# define hard_smp_processor_id() 0 214# endif 215 216#endif /* CONFIG_X86_LOCAL_APIC */ 217 218#ifdef CONFIG_DEBUG_NMI_SELFTEST 219extern void nmi_selftest(void); 220#else 221#define nmi_selftest() do { } while (0) 222#endif 223 224#endif /* __ASSEMBLY__ */ 225#endif /* _ASM_X86_SMP_H */ 226