1#include <linux/cpumask.h> 2#include <linux/interrupt.h> 3 4#include <linux/mm.h> 5#include <linux/delay.h> 6#include <linux/spinlock.h> 7#include <linux/kernel_stat.h> 8#include <linux/mc146818rtc.h> 9#include <linux/cache.h> 10#include <linux/cpu.h> 11#include <linux/module.h> 12 13#include <asm/smp.h> 14#include <asm/mtrr.h> 15#include <asm/tlbflush.h> 16#include <asm/mmu_context.h> 17#include <asm/apic.h> 18#include <asm/proto.h> 19#include <asm/ipi.h> 20 21void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) 22{ 23 unsigned long query_cpu; 24 unsigned long flags; 25 26 /* 27 * Hack. The clustered APIC addressing mode doesn't allow us to send 28 * to an arbitrary mask, so I do a unicast to each CPU instead. 29 * - mbligh 30 */ 31 local_irq_save(flags); 32 for_each_cpu(query_cpu, mask) { 33 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, 34 query_cpu), vector, APIC_DEST_PHYSICAL); 35 } 36 local_irq_restore(flags); 37} 38 39void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, 40 int vector) 41{ 42 unsigned int this_cpu = smp_processor_id(); 43 unsigned int query_cpu; 44 unsigned long flags; 45 46 /* See Hack comment above */ 47 48 local_irq_save(flags); 49 for_each_cpu(query_cpu, mask) { 50 if (query_cpu == this_cpu) 51 continue; 52 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, 53 query_cpu), vector, APIC_DEST_PHYSICAL); 54 } 55 local_irq_restore(flags); 56} 57 58#ifdef CONFIG_X86_32 59 60void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, 61 int vector) 62{ 63 unsigned long flags; 64 unsigned int query_cpu; 65 66 /* 67 * Hack. The clustered APIC addressing mode doesn't allow us to send 68 * to an arbitrary mask, so I do a unicasts to each CPU instead. This 69 * should be modified to do 1 message per cluster ID - mbligh 70 */ 71 72 local_irq_save(flags); 73 for_each_cpu(query_cpu, mask) 74 __default_send_IPI_dest_field( 75 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), 76 vector, apic->dest_logical); 77 local_irq_restore(flags); 78} 79 80void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, 81 int vector) 82{ 83 unsigned long flags; 84 unsigned int query_cpu; 85 unsigned int this_cpu = smp_processor_id(); 86 87 /* See Hack comment above */ 88 89 local_irq_save(flags); 90 for_each_cpu(query_cpu, mask) { 91 if (query_cpu == this_cpu) 92 continue; 93 __default_send_IPI_dest_field( 94 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), 95 vector, apic->dest_logical); 96 } 97 local_irq_restore(flags); 98} 99 100/* 101 * This is only used on smaller machines. 102 */ 103void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector) 104{ 105 unsigned long mask = cpumask_bits(cpumask)[0]; 106 unsigned long flags; 107 108 if (!mask) 109 return; 110 111 local_irq_save(flags); 112 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); 113 __default_send_IPI_dest_field(mask, vector, apic->dest_logical); 114 local_irq_restore(flags); 115} 116 117void default_send_IPI_allbutself(int vector) 118{ 119 /* 120 * if there are no other CPUs in the system then we get an APIC send 121 * error if we try to broadcast, thus avoid sending IPIs in this case. 122 */ 123 if (!(num_online_cpus() > 1)) 124 return; 125 126 __default_local_send_IPI_allbutself(vector); 127} 128 129void default_send_IPI_all(int vector) 130{ 131 __default_local_send_IPI_all(vector); 132} 133 134void default_send_IPI_self(int vector) 135{ 136 __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical); 137} 138 139/* must come after the send_IPI functions above for inlining */ 140static int convert_apicid_to_cpu(int apic_id) 141{ 142 int i; 143 144 for_each_possible_cpu(i) { 145 if (per_cpu(x86_cpu_to_apicid, i) == apic_id) 146 return i; 147 } 148 return -1; 149} 150 151int safe_smp_processor_id(void) 152{ 153 int apicid, cpuid; 154 155 if (!cpu_has_apic) 156 return 0; 157 158 apicid = hard_smp_processor_id(); 159 if (apicid == BAD_APICID) 160 return 0; 161 162 cpuid = convert_apicid_to_cpu(apicid); 163 164 return cpuid >= 0 ? cpuid : 0; 165} 166#endif 167