cpuid 110 arch/alpha/include/asm/err_common.h u64 cpuid; cpuid 169 arch/alpha/include/asm/hwrpb.h unsigned long cpuid; cpuid 93 arch/alpha/include/asm/mmu_context.h #define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn) cpuid 96 arch/alpha/include/asm/mmu_context.h #define cpu_last_asn(cpuid) last_asn cpuid 859 arch/alpha/kernel/core_marvel.c int cpuid; cpuid 862 arch/alpha/kernel/core_marvel.c cpuid = (~(pa >> 35) & 0xff); cpuid 864 arch/alpha/kernel/core_marvel.c cpuid = ((pa >> 34) & 0x3) | ((pa >> (37 - 2)) & (0x1f << 2)); cpuid 866 arch/alpha/kernel/core_marvel.c return marvel_cpuid_to_nid(cpuid); cpuid 870 arch/alpha/kernel/core_marvel.c marvel_cpuid_to_nid(int cpuid) cpuid 872 arch/alpha/kernel/core_marvel.c return cpuid; cpuid 452 arch/alpha/kernel/core_wildfire.c int wildfire_cpuid_to_nid(int cpuid) cpuid 455 arch/alpha/kernel/core_wildfire.c return cpuid >> 2; cpuid 552 arch/alpha/kernel/err_titan.c (int)header->by_type.regatta_frame.cpuid); cpuid 81 arch/alpha/kernel/process.c int cpuid = smp_processor_id(); cpuid 88 arch/alpha/kernel/process.c + hwrpb->processor_size * cpuid); cpuid 97 arch/alpha/kernel/process.c if (cpuid != boot_cpuid) { cpuid 100 arch/alpha/kernel/process.c set_cpu_present(cpuid, false); cpuid 101 arch/alpha/kernel/process.c set_cpu_possible(cpuid, false); cpuid 82 arch/alpha/kernel/smp.c smp_store_cpu_info(int cpuid) cpuid 84 arch/alpha/kernel/smp.c cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy; cpuid 85 arch/alpha/kernel/smp.c cpu_data[cpuid].last_asn = ASN_FIRST_VERSION; cpuid 86 arch/alpha/kernel/smp.c cpu_data[cpuid].need_new_asn = 0; cpuid 87 arch/alpha/kernel/smp.c cpu_data[cpuid].asn_lock = 0; cpuid 94 arch/alpha/kernel/smp.c smp_setup_percpu_timer(int cpuid) cpuid 96 arch/alpha/kernel/smp.c cpu_data[cpuid].prof_counter = 1; cpuid 97 arch/alpha/kernel/smp.c cpu_data[cpuid].prof_multiplier = 1; cpuid 101 arch/alpha/kernel/smp.c wait_boot_cpu_to_stop(int cpuid) cpuid 111 arch/alpha/kernel/smp.c printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid); cpuid 122 arch/alpha/kernel/smp.c int cpuid = hard_smp_processor_id(); cpuid 124 arch/alpha/kernel/smp.c if (cpu_online(cpuid)) { cpuid 125 arch/alpha/kernel/smp.c printk("??, cpu 0x%x already present??\n", cpuid); cpuid 128 arch/alpha/kernel/smp.c set_cpu_online(cpuid, true); cpuid 140 arch/alpha/kernel/smp.c smp_setup_percpu_timer(cpuid); cpuid 152 arch/alpha/kernel/smp.c notify_cpu_starting(cpuid); cpuid 159 arch/alpha/kernel/smp.c wait_boot_cpu_to_stop(cpuid); cpuid 163 arch/alpha/kernel/smp.c smp_store_cpu_info(cpuid); cpuid 169 arch/alpha/kernel/smp.c cpuid, current, current->active_mm)); cpuid 200 arch/alpha/kernel/smp.c send_secondary_console_msg(char *str, int cpuid) cpuid 210 arch/alpha/kernel/smp.c + cpuid * hwrpb->processor_size); cpuid 212 arch/alpha/kernel/smp.c cpumask = (1UL << cpuid); cpuid 224 arch/alpha/kernel/smp.c set_bit(cpuid, &hwrpb->rxrdy); cpuid 231 arch/alpha/kernel/smp.c printk("Processor %x not ready\n", cpuid); cpuid 292 arch/alpha/kernel/smp.c secondary_cpu_start(int cpuid, struct task_struct *idle) cpuid 301 arch/alpha/kernel/smp.c + cpuid * hwrpb->processor_size); cpuid 323 arch/alpha/kernel/smp.c cpuid, idle->state, ipcb->flags)); cpuid 341 arch/alpha/kernel/smp.c send_secondary_console_msg("START\r\n", cpuid); cpuid 351 arch/alpha/kernel/smp.c printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid); cpuid 355 arch/alpha/kernel/smp.c DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid)); cpuid 363 arch/alpha/kernel/smp.c smp_boot_one_cpu(int cpuid, struct task_struct *idle) cpuid 371 arch/alpha/kernel/smp.c if (secondary_cpu_start(cpuid, idle)) cpuid 390 arch/alpha/kernel/smp.c printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid); cpuid 406 arch/alpha/kernel/sys_marvel.c int cpuid = hard_smp_processor_id(); cpuid 407 arch/alpha/kernel/sys_marvel.c struct io7 *io7 = marvel_find_io7(cpuid); cpuid 416 arch/alpha/kernel/sys_marvel.c printk("Redirecting IO7 interrupts to local CPU at PE %u\n", cpuid); cpuid 419 arch/alpha/kernel/sys_marvel.c io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, cpuid); cpuid 420 arch/alpha/kernel/sys_marvel.c io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, cpuid); cpuid 421 arch/alpha/kernel/sys_marvel.c io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, cpuid); cpuid 422 arch/alpha/kernel/sys_marvel.c io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, cpuid); cpuid 423 arch/alpha/kernel/sys_marvel.c io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, cpuid); cpuid 427 arch/alpha/kernel/sys_marvel.c io7_redirect_one_lsi(io7, i, cpuid); cpuid 429 arch/alpha/kernel/sys_marvel.c io7_redirect_one_lsi(io7, 0x74, cpuid); cpuid 430 arch/alpha/kernel/sys_marvel.c io7_redirect_one_lsi(io7, 0x75, cpuid); cpuid 434 arch/alpha/kernel/sys_marvel.c io7_redirect_one_msi(io7, i, cpuid); cpuid 481 arch/arm/include/asm/cacheflush.h void check_cpu_icache_size(int cpuid); cpuid 483 arch/arm/include/asm/cacheflush.h static inline void check_cpu_icache_size(int cpuid) { } cpuid 15 arch/arm/include/asm/cpu.h u32 cpuid; cpuid 40 arch/arm/include/asm/smp_plat.h return is_smp() ? cpu_info->cpuid & ARM_CPU_PART_MASK : cpuid 22 arch/arm/include/asm/topology.h static inline void store_cpu_topology(unsigned int cpuid) { } cpuid 1242 arch/arm/kernel/setup.c u32 cpuid; cpuid 1251 arch/arm/kernel/setup.c cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id(); cpuid 1253 arch/arm/kernel/setup.c cpu_name, cpuid & 15, elf_platform); cpuid 1275 arch/arm/kernel/setup.c seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24); cpuid 1279 arch/arm/kernel/setup.c if ((cpuid & 0x0008f000) == 0x00000000) { cpuid 1281 arch/arm/kernel/setup.c seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4); cpuid 1283 arch/arm/kernel/setup.c if ((cpuid & 0x0008f000) == 0x00007000) { cpuid 1286 arch/arm/kernel/setup.c (cpuid >> 16) & 127); cpuid 1290 arch/arm/kernel/setup.c (cpuid >> 20) & 15); cpuid 1293 arch/arm/kernel/setup.c (cpuid >> 4) & 0xfff); cpuid 1295 arch/arm/kernel/setup.c seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15); cpuid 369 arch/arm/kernel/smp.c static void smp_store_cpu_info(unsigned int cpuid) cpuid 371 arch/arm/kernel/smp.c struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); cpuid 374 arch/arm/kernel/smp.c cpu_info->cpuid = read_cpuid_id(); cpuid 376 arch/arm/kernel/smp.c store_cpu_topology(cpuid); cpuid 377 arch/arm/kernel/smp.c check_cpu_icache_size(cpuid); cpuid 16 arch/arm/kernel/suspend.c extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid); cpuid 177 arch/arm/kernel/topology.c static inline void update_cpu_capacity(unsigned int cpuid) {} cpuid 194 arch/arm/kernel/topology.c void store_cpu_topology(unsigned int cpuid) cpuid 196 arch/arm/kernel/topology.c struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; cpuid 233 arch/arm/kernel/topology.c update_cpu_capacity(cpuid); cpuid 236 arch/arm/kernel/topology.c cpuid, cpu_topology[cpuid].thread_id, cpuid 237 arch/arm/kernel/topology.c cpu_topology[cpuid].core_id, cpuid 238 arch/arm/kernel/topology.c cpu_topology[cpuid].package_id, mpidr); cpuid 241 arch/arm/kernel/topology.c update_siblings_masks(cpuid); cpuid 155 arch/arm/mach-exynos/pm.c unsigned int cpuid = smp_processor_id(); cpuid 160 arch/arm/mach-exynos/pm.c exynos_set_boot_flag(cpuid, C2_STATE); cpuid 181 arch/arm/mach-exynos/pm.c exynos_clear_boot_flag(cpuid, C2_STATE); cpuid 413 arch/arm/mach-exynos/suspend.c u32 cpuid = read_cpuid_part(); cpuid 418 arch/arm/mach-exynos/suspend.c if (cpuid == ARM_CPU_PART_CORTEX_A9) cpuid 422 arch/arm/mach-exynos/suspend.c && cpuid == ARM_CPU_PART_CORTEX_A9) cpuid 434 arch/arm/mach-exynos/suspend.c u32 cpuid = read_cpuid_part(); cpuid 442 arch/arm/mach-exynos/suspend.c && cpuid == ARM_CPU_PART_CORTEX_A9) cpuid 316 arch/arm/mach-ixp4xx/common-pci.c unsigned long cpuid = read_cpuid_id(); cpuid 327 arch/arm/mach-ixp4xx/common-pci.c if (!(cpuid & 0xf) && cpu_is_ixp42x()) { cpuid 351 arch/arm/mach-omap2/id.c u32 cpuid, idcode; cpuid 360 arch/arm/mach-omap2/id.c cpuid = read_cpuid_id(); cpuid 361 arch/arm/mach-omap2/id.c if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) { cpuid 249 arch/arm/mm/init.c void check_cpu_icache_size(int cpuid) cpuid 256 arch/arm/mm/init.c if (cpuid != 0 && icache_size != size) cpuid 258 arch/arm/mm/init.c cpuid); cpuid 24 arch/arm64/kernel/topology.c void store_cpu_topology(unsigned int cpuid) cpuid 26 arch/arm64/kernel/topology.c struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; cpuid 55 arch/arm64/kernel/topology.c cpuid, cpuid_topo->package_id, cpuid_topo->core_id, cpuid 59 arch/arm64/kernel/topology.c update_siblings_masks(cpuid); cpuid 77 arch/ia64/include/asm/smp.h cpu_logical_id (int cpuid) cpuid 82 arch/ia64/include/asm/smp.h if (cpu_physical_id(i) == cpuid) cpuid 139 arch/ia64/kernel/crash.c int cpuid; cpuid 142 arch/ia64/kernel/crash.c cpuid = smp_processor_id(); cpuid 149 arch/ia64/kernel/crash.c kdump_status[cpuid] = 1; cpuid 1471 arch/ia64/kernel/mca.c unsigned int cpuid; cpuid 1473 arch/ia64/kernel/mca.c cpuid = smp_processor_id(); cpuid 1481 arch/ia64/kernel/mca.c cpuid = cpumask_next(cpuid+1, cpu_online_mask); cpuid 1483 arch/ia64/kernel/mca.c if (cpuid < nr_cpu_ids) { cpuid 1484 arch/ia64/kernel/mca.c ia64_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); cpuid 1539 arch/ia64/kernel/mca.c unsigned int cpuid; cpuid 1541 arch/ia64/kernel/mca.c cpuid = smp_processor_id(); cpuid 1549 arch/ia64/kernel/mca.c cpuid = cpumask_next(cpuid+1, cpu_online_mask); cpuid 1551 arch/ia64/kernel/mca.c if (cpuid < NR_CPUS) { cpuid 1552 arch/ia64/kernel/mca.c ia64_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); cpuid 825 arch/ia64/kernel/setup.c } cpuid; cpuid 832 arch/ia64/kernel/setup.c cpuid.bits[i] = ia64_get_cpuid(i); cpuid 834 arch/ia64/kernel/setup.c memcpy(c->vendor, cpuid.field.vendor, 16); cpuid 849 arch/ia64/kernel/setup.c c->ppn = cpuid.field.ppn; cpuid 850 arch/ia64/kernel/setup.c c->number = cpuid.field.number; cpuid 851 arch/ia64/kernel/setup.c c->revision = cpuid.field.revision; cpuid 852 arch/ia64/kernel/setup.c c->model = cpuid.field.model; cpuid 853 arch/ia64/kernel/setup.c c->family = cpuid.field.family; cpuid 854 arch/ia64/kernel/setup.c c->archrev = cpuid.field.archrev; cpuid 855 arch/ia64/kernel/setup.c c->features = cpuid.field.features; cpuid 172 arch/ia64/kernel/smpboot.c int cpuid; cpuid 175 arch/ia64/kernel/smpboot.c cpuid = smp_processor_id(); cpuid 180 arch/ia64/kernel/smpboot.c if (!(fix_bsp_b0 && cpuid)) cpuid 183 arch/ia64/kernel/smpboot.c sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0]; cpuid 184 arch/ia64/kernel/smpboot.c printk ("Fixed BSP b0 value from CPU %d\n", cpuid); cpuid 355 arch/ia64/kernel/smpboot.c int cpuid, phys_id, itc_master; cpuid 364 arch/ia64/kernel/smpboot.c cpuid = smp_processor_id(); cpuid 368 arch/ia64/kernel/smpboot.c if (cpu_online(cpuid)) { cpuid 370 arch/ia64/kernel/smpboot.c phys_id, cpuid); cpuid 379 arch/ia64/kernel/smpboot.c set_numa_node(cpu_to_node_map[cpuid]); cpuid 380 arch/ia64/kernel/smpboot.c set_numa_mem(local_memory_node(cpu_to_node_map[cpuid])); cpuid 384 arch/ia64/kernel/smpboot.c __setup_vector_irq(cpuid); cpuid 385 arch/ia64/kernel/smpboot.c notify_cpu_starting(cpuid); cpuid 386 arch/ia64/kernel/smpboot.c set_cpu_online(cpuid, true); cpuid 387 arch/ia64/kernel/smpboot.c per_cpu(cpu_state, cpuid) = CPU_ONLINE; cpuid 420 arch/ia64/kernel/smpboot.c last_cpuinfo = cpu_data(cpuid - 1); cpuid 435 arch/ia64/kernel/smpboot.c cpumask_set_cpu(cpuid, &cpu_callin_map); cpuid 436 arch/ia64/kernel/smpboot.c Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); cpuid 111 arch/m68k/include/asm/macintosh.h unsigned long cpuid; cpuid 121 arch/m68k/mac/config.c mac_bi_data.cpuid = be32_to_cpup(data); cpuid 819 arch/m68k/mac/config.c model = (mac_bi_data.cpuid >> 2) & 63; cpuid 878 arch/m68k/mac/config.c mac_bi_data.id, mac_bi_data.cpuid, mac_bi_data.memsize); cpuid 366 arch/mips/include/asm/octeon/octeon-model.h uint32_t cpuid = cvmx_get_proc_id(); cpuid 368 arch/mips/include/asm/octeon/octeon-model.h return __OCTEON_IS_MODEL_COMPILE__(model, cpuid); cpuid 37 arch/mips/include/asm/sn/agent.h SET_HUB_NIC(cpuid(), (_v)) cpuid 43 arch/mips/include/asm/sn/agent.h GET_HUB_NIC(cpuid()) cpuid 456 arch/mips/loongson64/loongson-3/smp.c register long cpuid, core, node, count; cpuid 509 arch/mips/loongson64/loongson-3/smp.c [base] "=&r" (base), [cpuid] "=&r" (cpuid), cpuid 518 arch/mips/loongson64/loongson-3/smp.c register long cpuid, core, node, count; cpuid 592 arch/mips/loongson64/loongson-3/smp.c [base] "=&r" (base), [cpuid] "=&r" (cpuid), cpuid 601 arch/mips/loongson64/loongson-3/smp.c register long cpuid, core, node, count; cpuid 656 arch/mips/loongson64/loongson-3/smp.c [base] "=&r" (base), [cpuid] "=&r" (cpuid), cpuid 223 arch/mips/paravirt/paravirt-irq.c unsigned int cpuid = cpunum_for_cpu(cpu); cpuid 224 arch/mips/paravirt/paravirt-irq.c mask = 1 << (cpuid * MBOX_BITS_PER_CPU + mbox); cpuid 225 arch/mips/paravirt/paravirt-irq.c __raw_writel(mask, base + (cpuid * mips_irq_cpu_stride)); cpuid 252 arch/mips/paravirt/paravirt-irq.c unsigned int cpuid = cpunum_for_cpu(cpu); cpuid 257 arch/mips/paravirt/paravirt-irq.c mask = actions << (cpuid * MBOX_BITS_PER_CPU); cpuid 264 arch/mips/paravirt/paravirt-irq.c unsigned int cpuid = get_ebase_cpunum(); cpuid 269 arch/mips/paravirt/paravirt-irq.c mask = 1 << (cpuid * MBOX_BITS_PER_CPU + mbox); cpuid 270 arch/mips/paravirt/paravirt-irq.c __raw_writel(mask, base + (cpuid * mips_irq_cpu_stride)); cpuid 327 arch/mips/paravirt/paravirt-irq.c unsigned int cpuid = get_ebase_cpunum(); cpuid 331 arch/mips/paravirt/paravirt-irq.c (cpuid * mips_irq_cpu_stride)); cpuid 334 arch/mips/paravirt/paravirt-irq.c en = __raw_readl(mips_irq_chip + mips_irq_chip_reg_src + (cpuid * mips_irq_cpu_stride) + sizeof(u32)); cpuid 335 arch/mips/paravirt/paravirt-irq.c en = (en >> (2 * cpuid)) & 3; cpuid 67 arch/mips/sgi-ip27/ip27-smp.c cpuid_t cpuid; cpuid 74 arch/mips/sgi-ip27/ip27-smp.c cpuid = acpu->cpu_info.virtid; cpuid 77 arch/mips/sgi-ip27/ip27-smp.c cpuid_to_compact_node[cpuid] = cnode; cpuid 78 arch/mips/sgi-ip27/ip27-smp.c if (cpuid > highest) cpuid 79 arch/mips/sgi-ip27/ip27-smp.c highest = cpuid; cpuid 83 arch/mips/sgi-ip27/ip27-smp.c set_cpu_possible(cpuid, true); cpuid 84 arch/mips/sgi-ip27/ip27-smp.c alloc_cpupda(cpuid, tot_cpus_found); cpuid 158 arch/mips/sgi-ip27/ip27-timer.c int cpuid; cpuid 165 arch/mips/sgi-ip27/ip27-timer.c cpuid = LOCAL_HUB_L(PI_CPU_NUM) ? IP27_CPU0_INDEX : IP27_CPU1_INDEX; cpuid 166 arch/mips/sgi-ip27/ip27-timer.c cpu = (klcpu_t *) KLCF_COMP(board, cpuid); cpuid 73 arch/parisc/include/asm/processor.h unsigned long cpuid; cpuid 87 arch/parisc/include/asm/processor.h unsigned long cpuid; /* aka slot_number or set to NO_PROC_ID */ cpuid 5 arch/parisc/include/asm/smp.h extern int init_per_cpu(int cpuid); cpuid 24 arch/parisc/include/asm/topology.h void store_cpu_topology(unsigned int cpuid); cpuid 30 arch/parisc/include/asm/topology.h static inline void store_cpu_topology(unsigned int cpuid) { } cpuid 940 arch/parisc/kernel/drivers.c boot_cpu_data.pdc.cpuid); cpuid 82 arch/parisc/kernel/processor.c unsigned long cpuid; cpuid 101 arch/parisc/kernel/processor.c cpuid = boot_cpu_data.cpu_count; cpuid 103 arch/parisc/kernel/processor.c cpu_info.cpu_num = cpu_info.cpu_loc = cpuid; cpuid 134 arch/parisc/kernel/processor.c cpuid, cpu_info.cpu_num, cpu_info.cpu_loc, cpuid 153 arch/parisc/kernel/processor.c cpuid = cpu_info.cpu_num; cpuid 159 arch/parisc/kernel/processor.c p = &per_cpu(cpu_data, cpuid); cpuid 163 arch/parisc/kernel/processor.c if (cpuid) cpuid 169 arch/parisc/kernel/processor.c p->cpuid = cpuid; /* save CPU id */ cpuid 174 arch/parisc/kernel/processor.c store_cpu_topology(cpuid); cpuid 181 arch/parisc/kernel/processor.c init_percpu_prof(cpuid); cpuid 192 arch/parisc/kernel/processor.c if (cpuid) { cpuid 205 arch/parisc/kernel/processor.c cpu_irq_actions[cpuid] = actions; cpuid 213 arch/parisc/kernel/processor.c if (cpuid) { cpuid 214 arch/parisc/kernel/processor.c set_cpu_present(cpuid, true); cpuid 215 arch/parisc/kernel/processor.c cpu_up(cpuid); cpuid 260 arch/parisc/kernel/processor.c if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK) { cpuid 262 arch/parisc/kernel/processor.c (boot_cpu_data.pdc.cpuid >> 5) & 127, cpuid 263 arch/parisc/kernel/processor.c boot_cpu_data.pdc.cpuid & 31, cpuid 264 arch/parisc/kernel/processor.c boot_cpu_data.pdc.cpuid); cpuid 266 arch/parisc/kernel/processor.c add_device_randomness(&boot_cpu_data.pdc.cpuid, cpuid 267 arch/parisc/kernel/processor.c sizeof(boot_cpu_data.pdc.cpuid)); cpuid 87 arch/parisc/kernel/smp.c ipi_init(int cpuid) cpuid 91 arch/parisc/kernel/smp.c if(cpu_online(cpuid) ) cpuid 320 arch/parisc/kernel/smp.c int smp_boot_one_cpu(int cpuid, struct task_struct *idle) cpuid 322 arch/parisc/kernel/smp.c const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); cpuid 325 arch/parisc/kernel/smp.c task_thread_info(idle)->cpu = cpuid; cpuid 330 arch/parisc/kernel/smp.c cpu_now_booting = cpuid; cpuid 339 arch/parisc/kernel/smp.c printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa); cpuid 359 arch/parisc/kernel/smp.c if(cpu_online(cpuid)) { cpuid 368 arch/parisc/kernel/smp.c printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); cpuid 374 arch/parisc/kernel/smp.c cpuid, timeout * 100); cpuid 380 arch/parisc/kernel/smp.c int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; cpuid 30 arch/parisc/kernel/topology.c static void update_siblings_masks(unsigned int cpuid) cpuid 32 arch/parisc/kernel/topology.c struct cputopo_parisc *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; cpuid 42 arch/parisc/kernel/topology.c cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); cpuid 43 arch/parisc/kernel/topology.c if (cpu != cpuid) cpuid 49 arch/parisc/kernel/topology.c cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); cpuid 50 arch/parisc/kernel/topology.c if (cpu != cpuid) cpuid 63 arch/parisc/kernel/topology.c void __init store_cpu_topology(unsigned int cpuid) cpuid 65 arch/parisc/kernel/topology.c struct cputopo_parisc *cpuid_topo = &cpu_topology[cpuid]; cpuid 78 arch/parisc/kernel/topology.c p = &per_cpu(cpu_data, cpuid); cpuid 82 arch/parisc/kernel/topology.c if (cpu == cpuid) /* ignore current cpu */ cpuid 102 arch/parisc/kernel/topology.c update_siblings_masks(cpuid); cpuid 105 arch/parisc/kernel/topology.c cpuid, cpu_topology[cpuid].thread_id, cpuid 106 arch/parisc/kernel/topology.c cpu_topology[cpuid].core_id, cpuid 107 arch/parisc/kernel/topology.c cpu_topology[cpuid].socket_id); cpuid 697 arch/powerpc/kernel/irq.c int cpuid; cpuid 712 arch/powerpc/kernel/irq.c cpuid = irq_rover; cpuid 716 arch/powerpc/kernel/irq.c cpuid = cpumask_first_and(mask, cpu_online_mask); cpuid 717 arch/powerpc/kernel/irq.c if (cpuid >= nr_cpu_ids) cpuid 721 arch/powerpc/kernel/irq.c return get_hard_smp_processor_id(cpuid); cpuid 75 arch/powerpc/sysdev/ehv_pic.c int cpuid = irq_choose_cpu(dest); cpuid 80 arch/powerpc/sysdev/ehv_pic.c ev_int_set_config(src, config, prio, cpuid); cpuid 831 arch/powerpc/sysdev/mpic.c int cpuid = irq_choose_cpu(cpumask); cpuid 833 arch/powerpc/sysdev/mpic.c mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); cpuid 945 arch/powerpc/sysdev/mpic.c static void mpic_set_destination(unsigned int virq, unsigned int cpuid) cpuid 951 arch/powerpc/sysdev/mpic.c mpic, virq, src, cpuid); cpuid 956 arch/powerpc/sysdev/mpic.c mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); cpuid 1909 arch/powerpc/sysdev/mpic.c int cpuid = get_hard_smp_processor_id(cpu); cpuid 1914 arch/powerpc/sysdev/mpic.c pir |= (1 << cpuid); cpuid 1919 arch/powerpc/sysdev/mpic.c pir &= ~(1 << cpuid); cpuid 1927 arch/powerpc/sysdev/mpic.c _mpic_write(mpic->reg_type, &mpic->cpuregs[cpuid], cpuid 47 arch/riscv/kernel/smpboot.c int cpuid; cpuid 53 arch/riscv/kernel/smpboot.c for_each_possible_cpu(cpuid) { cpuid 54 arch/riscv/kernel/smpboot.c if (cpuid == smp_processor_id()) cpuid 56 arch/riscv/kernel/smpboot.c set_cpu_present(cpuid, true); cpuid 65 arch/riscv/kernel/smpboot.c int cpuid = 1; cpuid 77 arch/riscv/kernel/smpboot.c if (cpuid >= NR_CPUS) { cpuid 79 arch/riscv/kernel/smpboot.c cpuid, hart); cpuid 83 arch/riscv/kernel/smpboot.c cpuid_to_hartid_map(cpuid) = hart; cpuid 84 arch/riscv/kernel/smpboot.c cpuid++; cpuid 89 arch/riscv/kernel/smpboot.c if (cpuid > nr_cpu_ids) cpuid 91 arch/riscv/kernel/smpboot.c cpuid, nr_cpu_ids); cpuid 93 arch/riscv/kernel/smpboot.c for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) { cpuid 94 arch/riscv/kernel/smpboot.c if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) cpuid 95 arch/riscv/kernel/smpboot.c set_cpu_possible(cpuid, true); cpuid 39 arch/s390/boot/als.c struct cpuid id; cpuid 720 arch/s390/include/asm/kvm_host.h u64 cpuid; cpuid 77 arch/s390/include/asm/processor.h static inline void get_cpu_id(struct cpuid *ptr) cpuid 24 arch/s390/include/uapi/asm/debug.h unsigned long long cpuid:8; cpuid 99 arch/s390/include/uapi/asm/kvm.h __u64 cpuid; cpuid 108 arch/s390/include/uapi/asm/kvm.h __u64 cpuid; cpuid 849 arch/s390/kernel/debug.c active->id.fields.cpuid = smp_processor_id(); cpuid 1460 arch/s390/kernel/debug.c entry->id.fields.cpuid, (void *)caller); cpuid 110 arch/s390/kernel/perf_cpum_cf_diag.c struct cpuid cpuid; cpuid 115 arch/s390/kernel/perf_cpum_cf_diag.c get_cpu_id(&cpuid); /* Machine type */ cpuid 116 arch/s390/kernel/perf_cpum_cf_diag.c te->mach_type = cpuid.machine; cpuid 579 arch/s390/kernel/perf_cpum_cf_events.c struct cpuid cpu_id; cpuid 31 arch/s390/kernel/processor.c struct cpuid cpu_id; cpuid 83 arch/s390/kernel/processor.c struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id); cpuid 144 arch/s390/kernel/processor.c struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu); cpuid 862 arch/s390/kernel/setup.c struct cpuid cpu_id; cpuid 1256 arch/s390/kvm/kvm-s390.c kvm->arch.model.cpuid = proc->cpuid; cpuid 1271 arch/s390/kvm/kvm-s390.c kvm->arch.model.cpuid); cpuid 1416 arch/s390/kvm/kvm-s390.c proc->cpuid = kvm->arch.model.cpuid; cpuid 1422 arch/s390/kvm/kvm-s390.c kvm->arch.model.cpuid); cpuid 1444 arch/s390/kvm/kvm-s390.c get_cpu_id((struct cpuid *) &mach->cpuid); cpuid 1452 arch/s390/kvm/kvm-s390.c kvm->arch.model.cpuid); cpuid 2367 arch/s390/kvm/kvm-s390.c struct cpuid cpuid; cpuid 2369 arch/s390/kvm/kvm-s390.c get_cpu_id(&cpuid); cpuid 2370 arch/s390/kvm/kvm-s390.c cpuid.version = 0xff; cpuid 2371 arch/s390/kvm/kvm-s390.c return *((u64 *) &cpuid); cpuid 2477 arch/s390/kvm/kvm-s390.c kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); cpuid 783 arch/s390/kvm/priv.c u64 stidp_data = vcpu->kvm->arch.model.cpuid; cpuid 258 arch/sparc/include/asm/hypervisor.h unsigned long sun4v_cpu_start(unsigned long cpuid, cpuid 286 arch/sparc/include/asm/hypervisor.h unsigned long sun4v_cpu_stop(unsigned long cpuid); cpuid 321 arch/sparc/include/asm/hypervisor.h unsigned long sun4v_cpu_poke(unsigned long cpuid); cpuid 453 arch/sparc/include/asm/hypervisor.h long sun4v_cpu_state(unsigned long cpuid); cpuid 1727 arch/sparc/include/asm/hypervisor.h unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid); cpuid 1831 arch/sparc/include/asm/hypervisor.h unsigned long *cpuid); cpuid 1834 arch/sparc/include/asm/hypervisor.h unsigned long cpuid); cpuid 197 arch/sparc/include/asm/obio.h static inline unsigned int cc_get_imsk_other(int cpuid) cpuid 203 arch/sparc/include/asm/obio.h "r" (ECSR_BASE(cpuid) | CC_IMSK), cpuid 208 arch/sparc/include/asm/obio.h static inline void cc_set_imsk_other(int cpuid, unsigned int mask) cpuid 212 arch/sparc/include/asm/obio.h "r" (ECSR_BASE(cpuid) | CC_IMSK), cpuid 116 arch/sparc/include/asm/oplib_64.h void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg); cpuid 119 arch/sparc/include/asm/oplib_64.h void prom_stopcpu_cpuid(int cpuid); cpuid 32 arch/sparc/include/asm/prom.h struct device_node *of_find_node_by_cpuid(int cpuid); cpuid 313 arch/sparc/kernel/irq_64.c static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) cpuid 318 arch/sparc/kernel/irq_64.c tid = starfire_translate(imap, cpuid); cpuid 328 arch/sparc/kernel/irq_64.c tid = cpuid << IMAP_TID_SHIFT; cpuid 331 arch/sparc/kernel/irq_64.c unsigned int a = cpuid & 0x1f; cpuid 332 arch/sparc/kernel/irq_64.c unsigned int n = (cpuid >> 5) & 0x1f; cpuid 340 arch/sparc/kernel/irq_64.c tid = cpuid << IMAP_TID_SHIFT; cpuid 352 arch/sparc/kernel/irq_64.c int cpuid; cpuid 356 arch/sparc/kernel/irq_64.c cpuid = map_to_cpu(irq); cpuid 361 arch/sparc/kernel/irq_64.c cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp); cpuid 364 arch/sparc/kernel/irq_64.c return cpuid; cpuid 377 arch/sparc/kernel/irq_64.c unsigned long cpuid, imap, val; cpuid 380 arch/sparc/kernel/irq_64.c cpuid = irq_choose_cpu(data->irq, cpuid 384 arch/sparc/kernel/irq_64.c tid = sun4u_compute_tid(imap, cpuid); cpuid 402 arch/sparc/kernel/irq_64.c unsigned long cpuid, imap, val; cpuid 405 arch/sparc/kernel/irq_64.c cpuid = irq_choose_cpu(data->irq, mask); cpuid 408 arch/sparc/kernel/irq_64.c tid = sun4u_compute_tid(imap, cpuid); cpuid 453 arch/sparc/kernel/irq_64.c unsigned long cpuid = irq_choose_cpu(data->irq, cpuid 458 arch/sparc/kernel/irq_64.c err = sun4v_intr_settarget(ino, cpuid); cpuid 461 arch/sparc/kernel/irq_64.c "err(%d)\n", ino, cpuid, err); cpuid 475 arch/sparc/kernel/irq_64.c unsigned long cpuid = irq_choose_cpu(data->irq, mask); cpuid 479 arch/sparc/kernel/irq_64.c err = sun4v_intr_settarget(ino, cpuid); cpuid 482 arch/sparc/kernel/irq_64.c "err(%d)\n", ino, cpuid, err); cpuid 513 arch/sparc/kernel/irq_64.c unsigned long cpuid; cpuid 516 arch/sparc/kernel/irq_64.c cpuid = irq_choose_cpu(data->irq, irq_data_get_affinity_mask(data)); cpuid 518 arch/sparc/kernel/irq_64.c err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); cpuid 522 arch/sparc/kernel/irq_64.c dev_handle, dev_ino, cpuid, err); cpuid 542 arch/sparc/kernel/irq_64.c unsigned long cpuid; cpuid 545 arch/sparc/kernel/irq_64.c cpuid = irq_choose_cpu(data->irq, mask); cpuid 547 arch/sparc/kernel/irq_64.c err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); cpuid 551 arch/sparc/kernel/irq_64.c dev_handle, dev_ino, cpuid, err); cpuid 80 arch/sparc/kernel/leon_smp.c int cpuid = hard_smp_processor_id(); cpuid 87 arch/sparc/kernel/leon_smp.c do_swap(&cpu_callin_map[cpuid], 1); cpuid 93 arch/sparc/kernel/leon_smp.c __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(¤t_set[cpuid]) cpuid 100 arch/sparc/kernel/leon_smp.c while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) cpuid 1114 arch/sparc/kernel/mdesc.c int cpuid = *id; cpuid 1117 arch/sparc/kernel/mdesc.c if (cpuid >= NR_CPUS) { cpuid 1120 arch/sparc/kernel/mdesc.c cpuid, NR_CPUS); cpuid 1123 arch/sparc/kernel/mdesc.c if (!cpumask_test_cpu(cpuid, mask)) cpuid 1127 arch/sparc/kernel/mdesc.c ret = func(hp, mp, cpuid, arg); cpuid 1136 arch/sparc/kernel/mdesc.c static void *record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, cpuid 1141 arch/sparc/kernel/mdesc.c set_cpu_present(cpuid, true); cpuid 1155 arch/sparc/kernel/mdesc.c static void * __init check_one_pgsz(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) cpuid 1179 arch/sparc/kernel/mdesc.c static void *fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, cpuid 1192 arch/sparc/kernel/mdesc.c if (cpuid != real_hard_smp_processor_id()) cpuid 1194 arch/sparc/kernel/mdesc.c cpuid = 0; cpuid 1197 arch/sparc/kernel/mdesc.c c = &cpu_data(cpuid); cpuid 1200 arch/sparc/kernel/mdesc.c tb = &trap_block[cpuid]; cpuid 441 arch/sparc/kernel/prom_64.c int cpuid = of_getintprop_default(dp, mid_prop, -1); cpuid 445 arch/sparc/kernel/prom_64.c if (cpuid < 0) { cpuid 447 arch/sparc/kernel/prom_64.c cpuid = of_getintprop_default(dp, this_mid_prop, -1); cpuid 449 arch/sparc/kernel/prom_64.c if (cpuid < 0) { cpuid 455 arch/sparc/kernel/prom_64.c if (cpuid >= NR_CPUS) { cpuid 458 arch/sparc/kernel/prom_64.c cpuid, NR_CPUS); cpuid 462 arch/sparc/kernel/prom_64.c ret = func(dp, cpuid, arg); cpuid 469 arch/sparc/kernel/prom_64.c static void *check_cpu_node(struct device_node *dp, int cpuid, int id) cpuid 471 arch/sparc/kernel/prom_64.c if (id == cpuid) cpuid 476 arch/sparc/kernel/prom_64.c struct device_node *of_find_node_by_cpuid(int cpuid) cpuid 478 arch/sparc/kernel/prom_64.c return of_iterate_over_cpus(check_cpu_node, cpuid); cpuid 481 arch/sparc/kernel/prom_64.c static void *record_one_cpu(struct device_node *dp, int cpuid, int arg) cpuid 485 arch/sparc/kernel/prom_64.c set_cpu_present(cpuid, true); cpuid 486 arch/sparc/kernel/prom_64.c set_cpu_possible(cpuid, true); cpuid 500 arch/sparc/kernel/prom_64.c static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg) cpuid 525 arch/sparc/kernel/prom_64.c if (cpuid != real_hard_smp_processor_id()) cpuid 527 arch/sparc/kernel/prom_64.c cpuid = 0; cpuid 530 arch/sparc/kernel/prom_64.c cpu_data(cpuid).clock_tick = cpuid 534 arch/sparc/kernel/prom_64.c cpu_data(cpuid).dcache_size = cpuid 537 arch/sparc/kernel/prom_64.c cpu_data(cpuid).dcache_line_size = cpuid 540 arch/sparc/kernel/prom_64.c cpu_data(cpuid).icache_size = cpuid 543 arch/sparc/kernel/prom_64.c cpu_data(cpuid).icache_line_size = cpuid 546 arch/sparc/kernel/prom_64.c cpu_data(cpuid).ecache_size = cpuid 548 arch/sparc/kernel/prom_64.c cpu_data(cpuid).ecache_line_size = cpuid 550 arch/sparc/kernel/prom_64.c if (!cpu_data(cpuid).ecache_size || cpuid 551 arch/sparc/kernel/prom_64.c !cpu_data(cpuid).ecache_line_size) { cpuid 552 arch/sparc/kernel/prom_64.c cpu_data(cpuid).ecache_size = cpuid 556 arch/sparc/kernel/prom_64.c cpu_data(cpuid).ecache_line_size = cpuid 561 arch/sparc/kernel/prom_64.c cpu_data(cpuid).core_id = portid + 1; cpuid 562 arch/sparc/kernel/prom_64.c cpu_data(cpuid).proc_id = portid; cpuid 564 arch/sparc/kernel/prom_64.c cpu_data(cpuid).dcache_size = cpuid 566 arch/sparc/kernel/prom_64.c cpu_data(cpuid).dcache_line_size = cpuid 569 arch/sparc/kernel/prom_64.c cpu_data(cpuid).icache_size = cpuid 571 arch/sparc/kernel/prom_64.c cpu_data(cpuid).icache_line_size = cpuid 574 arch/sparc/kernel/prom_64.c cpu_data(cpuid).ecache_size = cpuid 577 arch/sparc/kernel/prom_64.c cpu_data(cpuid).ecache_line_size = cpuid 580 arch/sparc/kernel/prom_64.c cpu_data(cpuid).core_id = 0; cpuid 581 arch/sparc/kernel/prom_64.c cpu_data(cpuid).proc_id = -1; cpuid 186 arch/sparc/kernel/smp_32.c int i, cpuid, extra; cpuid 191 arch/sparc/kernel/smp_32.c for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) { cpuid 192 arch/sparc/kernel/smp_32.c if (cpuid >= NR_CPUS) cpuid 246 arch/sparc/kernel/smp_32.c int cpuid = hard_smp_processor_id(); cpuid 248 arch/sparc/kernel/smp_32.c if (cpuid >= NR_CPUS) { cpuid 252 arch/sparc/kernel/smp_32.c if (cpuid != 0) cpuid 255 arch/sparc/kernel/smp_32.c current_thread_info()->cpu = cpuid; cpuid 256 arch/sparc/kernel/smp_32.c set_cpu_online(cpuid, true); cpuid 257 arch/sparc/kernel/smp_32.c set_cpu_possible(cpuid, true); cpuid 318 arch/sparc/kernel/smp_32.c unsigned int cpuid = hard_smp_processor_id(); cpuid 320 arch/sparc/kernel/smp_32.c register_percpu_ce(cpuid); cpuid 323 arch/sparc/kernel/smp_32.c smp_store_cpu_info(cpuid); cpuid 106 arch/sparc/kernel/smp_64.c int cpuid = hard_smp_processor_id(); cpuid 108 arch/sparc/kernel/smp_64.c __local_per_cpu_offset = __per_cpu_offset(cpuid); cpuid 134 arch/sparc/kernel/smp_64.c notify_cpu_starting(cpuid); cpuid 136 arch/sparc/kernel/smp_64.c while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) cpuid 139 arch/sparc/kernel/smp_64.c set_cpu_online(cpuid, true); cpuid 629 arch/sparc/kernel/smp_64.c #define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid]) cpuid 33 arch/sparc/kernel/sun4d_irq.c unsigned int cpuid; /* target cpu */ cpuid 195 arch/sparc/kernel/sun4d_irq.c int cpuid = handler_data->cpuid; cpuid 201 arch/sparc/kernel/sun4d_irq.c cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | (1 << real_irq)); cpuid 213 arch/sparc/kernel/sun4d_irq.c int cpuid = handler_data->cpuid; cpuid 220 arch/sparc/kernel/sun4d_irq.c cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) & ~(1 << real_irq)); cpuid 254 arch/sparc/kernel/sun4d_irq.c int cpuid = cpu_logical_map(1); cpuid 256 arch/sparc/kernel/sun4d_irq.c if (cpuid == -1) cpuid 257 arch/sparc/kernel/sun4d_irq.c cpuid = cpu_logical_map(0); cpuid 261 arch/sparc/kernel/sun4d_irq.c board_to_cpu[board] = cpuid; cpuid 262 arch/sparc/kernel/sun4d_irq.c set_sbi_tid(devid, cpuid << 3); cpuid 264 arch/sparc/kernel/sun4d_irq.c printk(KERN_ERR "All sbus IRQs directed to CPU%d\n", cpuid); cpuid 312 arch/sparc/kernel/sun4d_irq.c handler_data->cpuid = board_to_cpu[board]; cpuid 45 arch/sparc/kernel/sun4d_smp.c static inline void show_leds(int cpuid) cpuid 47 arch/sparc/kernel/sun4d_smp.c cpuid &= 0x1e; cpuid 49 arch/sparc/kernel/sun4d_smp.c "r" ((cpu_leds[cpuid] << 4) | cpu_leds[cpuid+1]), cpuid 50 arch/sparc/kernel/sun4d_smp.c "r" (ECSR_BASE(cpuid) | BB_LEDS), cpuid 56 arch/sparc/kernel/sun4d_smp.c int cpuid = hard_smp_processor_id(); cpuid 59 arch/sparc/kernel/sun4d_smp.c cpu_leds[cpuid] = 0x6; cpuid 60 arch/sparc/kernel/sun4d_smp.c show_leds(cpuid); cpuid 69 arch/sparc/kernel/sun4d_smp.c int cpuid; cpuid 71 arch/sparc/kernel/sun4d_smp.c cpuid = hard_smp_processor_id(); cpuid 78 arch/sparc/kernel/sun4d_smp.c sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1); cpuid 82 arch/sparc/kernel/sun4d_smp.c while ((unsigned long)current_set[cpuid] < PAGE_OFFSET) cpuid 85 arch/sparc/kernel/sun4d_smp.c while (current_set[cpuid]->cpu != cpuid) cpuid 90 arch/sparc/kernel/sun4d_smp.c : : "r" (¤t_set[cpuid]) cpuid 93 arch/sparc/kernel/sun4d_smp.c cpu_leds[cpuid] = 0x9; cpuid 94 arch/sparc/kernel/sun4d_smp.c show_leds(cpuid); cpuid 103 arch/sparc/kernel/sun4d_smp.c while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) cpuid 44 arch/sparc/kernel/sun4m_smp.c int cpuid = hard_smp_processor_id(); cpuid 51 arch/sparc/kernel/sun4m_smp.c swap_ulong(&cpu_callin_map[cpuid], 1); cpuid 59 arch/sparc/kernel/sun4m_smp.c : : "r" (¤t_set[cpuid]) cpuid 66 arch/sparc/kernel/sun4m_smp.c while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) cpuid 166 arch/sparc/kernel/time_64.c static unsigned long cpuid_to_freq(phandle node, int cpuid) cpuid 179 arch/sparc/kernel/time_64.c if (is_cpu_node && (prom_getint(node, "upa-portid") == cpuid || cpuid 180 arch/sparc/kernel/time_64.c prom_getint(node, "cpuid") == cpuid)) cpuid 183 arch/sparc/kernel/time_64.c freq = cpuid_to_freq(prom_getchild(node), cpuid); cpuid 185 arch/sparc/kernel/time_64.c freq = cpuid_to_freq(prom_getsibling(node), cpuid); cpuid 393 arch/sparc/prom/misc_64.c void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg) cpuid 400 arch/sparc/prom/misc_64.c args[3] = (unsigned int) cpuid; cpuid 406 arch/sparc/prom/misc_64.c void prom_stopcpu_cpuid(int cpuid) cpuid 413 arch/sparc/prom/misc_64.c args[3] = (unsigned int) cpuid; cpuid 101 arch/x86/boot/cpuflags.c cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2], cpuid 106 arch/x86/boot/cpuflags.c cpuid(0x1, &tfms, &ignored, &cpu.flags[4], cpuid 120 arch/x86/boot/cpuflags.c cpuid(0x80000000, &max_amd_level, &ignored, &ignored, cpuid 125 arch/x86/boot/cpuflags.c cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6], cpuid 401 arch/x86/events/amd/uncore.c cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); cpuid 4546 arch/x86/events/intel/core.c cpuid(10, &eax.full, &ebx.full, &unused, &edx.full); cpuid 199 arch/x86/events/intel/pt.c cpuid(CPUID_TSC_LEAF, &eax, &ebx, &ecx, &edx); cpuid 61 arch/x86/include/asm/alternative.h u16 cpuid; /* cpuid bit set for replacement */ cpuid 91 arch/x86/include/asm/paravirt.h PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx); cpuid 148 arch/x86/include/asm/paravirt_types.h void (*cpuid)(unsigned int *eax, unsigned int *ebx, cpuid 615 arch/x86/include/asm/processor.h cpuid(op, &eax, &ebx, &ecx, &edx); cpuid 624 arch/x86/include/asm/processor.h cpuid(op, &eax, &ebx, &ecx, &edx); cpuid 633 arch/x86/include/asm/processor.h cpuid(op, &eax, &ebx, &ecx, &edx); cpuid 642 arch/x86/include/asm/processor.h cpuid(op, &eax, &ebx, &ecx, &edx); cpuid 949 arch/x86/include/asm/processor.h cpuid(base, &eax, &signature[0], &signature[1], &signature[2]); cpuid 384 arch/x86/include/asm/xen/interface.h #define XEN_CPUID XEN_EMULATE_PREFIX cpuid cpuid 25 arch/x86/include/uapi/asm/mce.h __u32 cpuid; /* CPUID 1 EAX */ cpuid 108 arch/x86/kernel/acpi/cstate.c cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); cpuid 392 arch/x86/kernel/alternative.c BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); cpuid 393 arch/x86/kernel/alternative.c if (!boot_cpu_has(a->cpuid)) { cpuid 401 arch/x86/kernel/alternative.c a->cpuid >> 5, cpuid 402 arch/x86/kernel/alternative.c a->cpuid & 0x1f, cpuid 317 arch/x86/kernel/apic/ipi.c int apicid, cpuid; cpuid 326 arch/x86/kernel/apic/ipi.c cpuid = convert_apicid_to_cpu(apicid); cpuid 328 arch/x86/kernel/apic/ipi.c return cpuid >= 0 ? cpuid : 0; cpuid 197 arch/x86/kernel/apic/x2apic_uv_x.c cpuid(0, &eax, &ebx, &ecx, &edx); cpuid 346 arch/x86/kernel/cpu/amd.c cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); cpuid 529 arch/x86/kernel/cpu/amd.c u32 cpuid, assoc; cpuid 531 arch/x86/kernel/cpu/amd.c cpuid = cpuid_edx(0x80000005); cpuid 532 arch/x86/kernel/cpu/amd.c assoc = cpuid >> 16 & 0xff; cpuid 533 arch/x86/kernel/cpu/amd.c upperbit = ((cpuid >> 24) << 10) / assoc; cpuid 1021 arch/x86/kernel/cpu/amd.c cpuid(0x80000006, &eax, &ebx, &ecx, &edx); cpuid 1031 arch/x86/kernel/cpu/amd.c cpuid(0x80000005, &eax, &ebx, &ecx, &edx); cpuid 1050 arch/x86/kernel/cpu/amd.c cpuid(0x80000005, &eax, &ebx, &ecx, &edx); cpuid 245 arch/x86/kernel/cpu/cacheinfo.c cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val); cpuid 246 arch/x86/kernel/cpu/cacheinfo.c cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val); cpuid 795 arch/x86/kernel/cpu/cacheinfo.c cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); cpuid 238 arch/x86/kernel/cpu/centaur.c cpuid(0x80000005, &aa, &bb, &cc, &dd); cpuid 632 arch/x86/kernel/cpu/common.c cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); cpuid 633 arch/x86/kernel/cpu/common.c cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); cpuid 634 arch/x86/kernel/cpu/common.c cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); cpuid 674 arch/x86/kernel/cpu/common.c cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); cpuid 685 arch/x86/kernel/cpu/common.c cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); cpuid 742 arch/x86/kernel/cpu/common.c cpuid(1, &eax, &ebx, &ecx, &edx); cpuid 802 arch/x86/kernel/cpu/common.c cpuid(0x00000000, (unsigned int *)&c->cpuid_level, cpuid 812 arch/x86/kernel/cpu/common.c cpuid(0x00000001, &tfms, &misc, &junk, &cap0); cpuid 905 arch/x86/kernel/cpu/common.c cpuid(0x00000001, &eax, &ebx, &ecx, &edx); cpuid 942 arch/x86/kernel/cpu/common.c cpuid(0x80000001, &eax, &ebx, &ecx, &edx); cpuid 950 arch/x86/kernel/cpu/common.c cpuid(0x80000007, &eax, &ebx, &ecx, &edx); cpuid 957 arch/x86/kernel/cpu/common.c cpuid(0x80000008, &eax, &ebx, &ecx, &edx); cpuid 981 arch/x86/kernel/cpu/common.c cpuid(0x80000008, &eax, &ebx, &ecx, &edx); cpuid 75 arch/x86/kernel/cpu/hygon.c cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); cpuid 364 arch/x86/kernel/cpu/hygon.c cpuid(0x80000006, &eax, &ebx, &ecx, &edx); cpuid 380 arch/x86/kernel/cpu/hygon.c cpuid(0x80000005, &eax, &ebx, &ecx, &edx); cpuid 323 arch/x86/kernel/cpu/intel.c cpuid(0x00000001, &eax, &ebx, &ecx, &edx); cpuid 955 arch/x86/kernel/cpu/intel.c cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); cpuid 140 arch/x86/kernel/cpu/mce/core.c m->cpuid = cpuid_eax(1); cpuid 280 arch/x86/kernel/cpu/mce/core.c m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, cpuid 103 arch/x86/kernel/cpu/mce/inject.c m->cpuid = cpuid_eax(1); cpuid 157 arch/x86/kernel/cpu/mshyperv.c cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, cpuid 37 arch/x86/kernel/cpu/transmeta.c cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags); cpuid 48 arch/x86/kernel/cpu/transmeta.c cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy); cpuid 61 arch/x86/kernel/cpu/transmeta.c cpuid(0x80860003, cpuid 66 arch/x86/kernel/cpu/transmeta.c cpuid(0x80860004, cpuid 71 arch/x86/kernel/cpu/transmeta.c cpuid(0x80860005, cpuid 76 arch/x86/kernel/cpu/transmeta.c cpuid(0x80860006, cpuid 220 arch/x86/kernel/cpu/vmware.c cpuid(CPUID_VMWARE_FEATURES_LEAF, &eax, &ebx, &ecx, &edx); cpuid 238 arch/x86/kernel/cpu/vmware.c cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0], cpuid 80 arch/x86/kernel/cpu/zhaoxin.c cpuid(0x00000001, &eax, &ebx, &ecx, &edx); cpuid 308 arch/x86/kernel/paravirt.c .cpu.cpuid = native_cpuid, cpuid 156 arch/x86/kernel/smpboot.c int cpuid; cpuid 164 arch/x86/kernel/smpboot.c cpuid = smp_processor_id(); cpuid 178 arch/x86/kernel/smpboot.c smp_store_cpu_info(cpuid); cpuid 193 arch/x86/kernel/smpboot.c cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy; cpuid 194 arch/x86/kernel/smpboot.c pr_debug("Stack at about %p\n", &cpuid); cpuid 198 arch/x86/kernel/smpboot.c notify_cpu_starting(cpuid); cpuid 203 arch/x86/kernel/smpboot.c cpumask_set_cpu(cpuid, cpu_callin_mask); cpuid 628 arch/x86/kernel/tsc.c cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx); cpuid 660 arch/x86/kernel/tsc.c cpuid(0x16, &eax_base_mhz, &ebx, &ecx, &edx); cpuid 700 arch/x86/kernel/tsc.c cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx); cpuid 1049 arch/x86/kernel/tsc.c cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator, cpuid 199 arch/x86/kvm/cpuid.c struct kvm_cpuid *cpuid, cpuid 206 arch/x86/kvm/cpuid.c if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) cpuid 209 arch/x86/kvm/cpuid.c if (cpuid->nent) { cpuid 212 arch/x86/kvm/cpuid.c cpuid->nent)); cpuid 217 arch/x86/kvm/cpuid.c cpuid->nent * sizeof(struct kvm_cpuid_entry))) cpuid 220 arch/x86/kvm/cpuid.c for (i = 0; i < cpuid->nent; i++) { cpuid 232 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_nent = cpuid->nent; cpuid 244 arch/x86/kvm/cpuid.c struct kvm_cpuid2 *cpuid, cpuid 250 arch/x86/kvm/cpuid.c if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) cpuid 254 arch/x86/kvm/cpuid.c cpuid->nent * sizeof(struct kvm_cpuid_entry2))) cpuid 256 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_nent = cpuid->nent; cpuid 265 arch/x86/kvm/cpuid.c struct kvm_cpuid2 *cpuid, cpuid 271 arch/x86/kvm/cpuid.c if (cpuid->nent < vcpu->arch.cpuid_nent) cpuid 280 arch/x86/kvm/cpuid.c cpuid->nent = vcpu->arch.cpuid_nent; cpuid 865 arch/x86/kvm/cpuid.c int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, cpuid 879 arch/x86/kvm/cpuid.c if (cpuid->nent < 1) cpuid 881 arch/x86/kvm/cpuid.c if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) cpuid 882 arch/x86/kvm/cpuid.c cpuid->nent = KVM_MAX_CPUID_ENTRIES; cpuid 884 arch/x86/kvm/cpuid.c if (sanity_check_entries(entries, cpuid->nent, type)) cpuid 889 arch/x86/kvm/cpuid.c cpuid->nent)); cpuid 901 arch/x86/kvm/cpuid.c &nent, cpuid->nent, type); cpuid 907 arch/x86/kvm/cpuid.c for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func) cpuid 909 arch/x86/kvm/cpuid.c &nent, cpuid->nent, type); cpuid 919 arch/x86/kvm/cpuid.c cpuid->nent = nent; cpuid 13 arch/x86/kvm/cpuid.h int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, cpuid 17 arch/x86/kvm/cpuid.h struct kvm_cpuid *cpuid, cpuid 20 arch/x86/kvm/cpuid.h struct kvm_cpuid2 *cpuid, cpuid 23 arch/x86/kvm/cpuid.h struct kvm_cpuid2 *cpuid, cpuid 71 arch/x86/kvm/cpuid.h const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); cpuid 73 arch/x86/kvm/cpuid.h entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index); cpuid 77 arch/x86/kvm/cpuid.h switch (cpuid.reg) { cpuid 4879 arch/x86/kvm/emulate.c II(ImplicitOps, em_cpuid, cpuid), cpuid 1786 arch/x86/kvm/hyperv.c int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, cpuid 1808 arch/x86/kvm/hyperv.c if (cpuid->nent < nent) cpuid 1811 arch/x86/kvm/hyperv.c if (cpuid->nent > nent) cpuid 1812 arch/x86/kvm/hyperv.c cpuid->nent = nent; cpuid 97 arch/x86/kvm/hyperv.h int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, cpuid 5199 arch/x86/kvm/svm.c int cpuid = vcpu->cpu; cpuid 5201 arch/x86/kvm/svm.c if (cpuid != get_cpu()) cpuid 5202 arch/x86/kvm/svm.c wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpuid)); cpuid 5994 arch/x86/kvm/svm.c cpuid(0x8000001f, &entry->eax, &entry->ebx, cpuid 3426 arch/x86/kvm/x86.c struct kvm_cpuid2 cpuid; cpuid 3429 arch/x86/kvm/x86.c if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) cpuid 3432 arch/x86/kvm/x86.c r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, cpuid 3438 arch/x86/kvm/x86.c if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) cpuid 4241 arch/x86/kvm/x86.c struct kvm_cpuid cpuid; cpuid 4244 arch/x86/kvm/x86.c if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) cpuid 4246 arch/x86/kvm/x86.c r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); cpuid 4251 arch/x86/kvm/x86.c struct kvm_cpuid2 cpuid; cpuid 4254 arch/x86/kvm/x86.c if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) cpuid 4256 arch/x86/kvm/x86.c r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, cpuid 4262 arch/x86/kvm/x86.c struct kvm_cpuid2 cpuid; cpuid 4265 arch/x86/kvm/x86.c if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) cpuid 4267 arch/x86/kvm/x86.c r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, cpuid 4272 arch/x86/kvm/x86.c if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) cpuid 4527 arch/x86/kvm/x86.c struct kvm_cpuid2 cpuid; cpuid 4530 arch/x86/kvm/x86.c if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid))) cpuid 4533 arch/x86/kvm/x86.c r = kvm_vcpu_ioctl_get_hv_cpuid(vcpu, &cpuid, cpuid 4539 arch/x86/kvm/x86.c if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid))) cpuid 114 arch/x86/xen/enlighten_hvm.c cpuid(base + 4, &eax, &ebx, &ecx, &edx); cpuid 1015 arch/x86/xen/enlighten_pv.c .cpuid = xen_cpuid, cpuid 103 arch/x86/xen/pmu.c cpuid(0xa, &eax, &ebx, &ecx, &edx); cpuid 46 drivers/acpi/acpi_pad.c cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); cpuid 24 drivers/acpi/processor_pdc.c int cpuid, type; cpuid 54 drivers/acpi/processor_pdc.c cpuid = acpi_get_cpuid(handle, type, acpi_id); cpuid 56 drivers/acpi/processor_pdc.c return !invalid_logical_cpuid(cpuid); cpuid 451 drivers/base/arch_topology.c void update_siblings_masks(unsigned int cpuid) cpuid 453 drivers/base/arch_topology.c struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; cpuid 462 drivers/base/arch_topology.c cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); cpuid 468 drivers/base/arch_topology.c cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); cpuid 474 drivers/base/arch_topology.c cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); cpuid 41 drivers/base/cpu.c int cpuid = cpu->dev.id; cpuid 42 drivers/base/cpu.c unregister_cpu_under_node(cpuid, from_nid); cpuid 43 drivers/base/cpu.c register_cpu_under_node(cpuid, to_nid); cpuid 50 drivers/base/cpu.c int cpuid = dev->id; cpuid 54 drivers/base/cpu.c from_nid = cpu_to_node(cpuid); cpuid 58 drivers/base/cpu.c ret = cpu_up(cpuid); cpuid 63 drivers/base/cpu.c to_nid = cpu_to_node(cpuid); cpuid 85 drivers/clocksource/timer-riscv.c int cpuid, hartid, error; cpuid 94 drivers/clocksource/timer-riscv.c cpuid = riscv_hartid_to_cpuid(hartid); cpuid 95 drivers/clocksource/timer-riscv.c if (cpuid < 0) { cpuid 97 drivers/clocksource/timer-riscv.c return cpuid; cpuid 100 drivers/clocksource/timer-riscv.c if (cpuid != smp_processor_id()) cpuid 104 drivers/clocksource/timer-riscv.c __func__, cpuid, hartid); cpuid 108 drivers/clocksource/timer-riscv.c error, cpuid); cpuid 177 drivers/cpufreq/acpi-cpufreq.c static int check_est_cpu(unsigned int cpuid) cpuid 179 drivers/cpufreq/acpi-cpufreq.c struct cpuinfo_x86 *cpu = &cpu_data(cpuid); cpuid 184 drivers/cpufreq/acpi-cpufreq.c static int check_amd_hwpstate_cpu(unsigned int cpuid) cpuid 186 drivers/cpufreq/acpi-cpufreq.c struct cpuinfo_x86 *cpu = &cpu_data(cpuid); cpuid 143 drivers/cpufreq/longrun.c cpuid(0x80860007, &eax, &ebx, &ecx, &edx); cpuid 211 drivers/cpufreq/longrun.c cpuid(0x80860007, &eax, &ebx, &ecx, &edx); cpuid 222 drivers/cpufreq/longrun.c cpuid(0x80860007, &eax, &ebx, &ecx, &edx); cpuid 158 drivers/cpufreq/p4-clockmod.c int cpuid = 0; cpuid 166 drivers/cpufreq/p4-clockmod.c cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping; cpuid 167 drivers/cpufreq/p4-clockmod.c switch (cpuid) { cpuid 481 drivers/cpufreq/pmac64-cpufreq.c struct device_node *cpuid = NULL, *hwclock = NULL; cpuid 491 drivers/cpufreq/pmac64-cpufreq.c cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0"); cpuid 492 drivers/cpufreq/pmac64-cpufreq.c if (cpuid != NULL) cpuid 493 drivers/cpufreq/pmac64-cpufreq.c eeprom = of_get_property(cpuid, "cpuid", NULL); cpuid 641 drivers/cpufreq/pmac64-cpufreq.c of_node_put(cpuid); cpuid 50 drivers/cpufreq/powernow-k7.c u32 cpuid; cpuid 139 drivers/cpufreq/powernow-k7.c cpuid(0x80000007, &eax, &ebx, &ecx, &edx); cpuid 444 drivers/cpufreq/powernow-k7.c pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); cpuid 497 drivers/cpufreq/powernow-k7.c if ((etuple == pst->cpuid) && cpuid 482 drivers/cpufreq/powernow-k8.c cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); cpuid 670 drivers/cpufreq/powernow-k8.c if ((psb->cpuid == 0x00000fc0) || cpuid 671 drivers/cpufreq/powernow-k8.c (psb->cpuid == 0x00000fe0)) { cpuid 168 drivers/cpufreq/powernow-k8.h u32 cpuid; cpuid 200 drivers/cpufreq/speedstep-centrino.c #define _BANIAS(cpuid, max, name) \ cpuid 201 drivers/cpufreq/speedstep-centrino.c { .cpu_id = cpuid, \ cpuid 386 drivers/crypto/hisilicon/sec/sec_drv.h u32 cpuid; cpuid 841 drivers/edac/mce_amd.c unsigned int fam = x86_family(m->cpuid); cpuid 899 drivers/edac/mce_amd.c unsigned int fam = x86_family(m->cpuid); cpuid 1039 drivers/edac/mce_amd.c unsigned int fam = x86_family(m->cpuid); cpuid 1049 drivers/edac/mce_amd.c fam, x86_model(m->cpuid), x86_stepping(m->cpuid), cpuid 1427 drivers/edac/pnd2_edac.c mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid); cpuid 3182 drivers/edac/sb_edac.c "%u APIC %x\n", mce->cpuvendor, mce->cpuid, cpuid 613 drivers/edac/skx_common.c "%u APIC 0x%x\n", mce->cpuvendor, mce->cpuid, cpuid 269 drivers/firmware/efi/cper-x86.c print_hex_dump(pfx, "", DUMP_PREFIX_OFFSET, 16, 4, proc->cpuid, cpuid 270 drivers/firmware/efi/cper-x86.c sizeof(proc->cpuid), 0); cpuid 177 drivers/firmware/psci/psci.c static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) cpuid 183 drivers/firmware/psci/psci.c err = invoke_psci_fn(fn, cpuid, entry_point, 0); cpuid 187 drivers/firmware/psci/psci.c static int psci_migrate(unsigned long cpuid) cpuid 193 drivers/firmware/psci/psci.c err = invoke_psci_fn(fn, cpuid, 0, 0); cpuid 353 drivers/firmware/psci/psci.c unsigned long cpuid; cpuid 374 drivers/firmware/psci/psci.c cpuid = psci_migrate_info_up_cpu(); cpuid 375 drivers/firmware/psci/psci.c if (cpuid & ~MPIDR_HWID_BITMASK) { cpuid 377 drivers/firmware/psci/psci.c cpuid); cpuid 381 drivers/firmware/psci/psci.c cpu = get_logical_index(cpuid); cpuid 384 drivers/firmware/psci/psci.c pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid); cpuid 480 drivers/gpu/drm/gma500/mmu.c cpuid(0x00000001, &tfms, &misc, &cap0, &cap4); cpuid 195 drivers/hwmon/ibmpowernv.c int cpuid = get_logical_cpu(id); cpuid 197 drivers/hwmon/ibmpowernv.c if (cpuid >= 0) cpuid 204 drivers/hwmon/ibmpowernv.c cpuid); cpuid 1128 drivers/idle/intel_idle.c cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); cpuid 1282 drivers/idle/intel_idle.c cpuid(7, &eax, &ebx, &ecx, &edx); cpuid 1118 drivers/infiniband/hw/hfi1/sdma.c unsigned long cpuid) cpuid 1123 drivers/infiniband/hw/hfi1/sdma.c rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid, cpuid 1128 drivers/infiniband/hw/hfi1/sdma.c seq_printf(s, "cpu%3lu: ", cpuid); cpuid 1072 drivers/infiniband/hw/hfi1/sdma.h unsigned long cpuid); cpuid 124 drivers/irqchip/irq-armada-370-xp.c #define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid) cpuid 399 drivers/irqchip/irq-armada-370-xp.c unsigned long cpuid = cpu_logical_map(smp_processor_id()); cpuid 402 drivers/irqchip/irq-armada-370-xp.c writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid), cpuid 509 drivers/irqchip/irq-armada-370-xp.c unsigned long irqmap, irqn, irqsrc, cpuid; cpuid 515 drivers/irqchip/irq-armada-370-xp.c cpuid = cpu_logical_map(smp_processor_id()); cpuid 524 drivers/irqchip/irq-armada-370-xp.c if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid))) cpuid 183 drivers/irqchip/irq-vf610-mscm-ir.c int ret, cpuid; cpuid 209 drivers/irqchip/irq-vf610-mscm-ir.c regmap_read(mscm_cp_regmap, MSCM_CPxNUM, &cpuid); cpuid 210 drivers/irqchip/irq-vf610-mscm-ir.c mscm_ir_data->cpu_mask = 0x1 << cpuid; cpuid 54 drivers/misc/sgi-xp/xp_main.c int (*xp_cpu_to_nasid) (int cpuid); cpuid 83 drivers/misc/sgi-xp/xp_uv.c xp_cpu_to_nasid_uv(int cpuid) cpuid 86 drivers/misc/sgi-xp/xp_uv.c return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid)); cpuid 306 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) cpuid 310 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c HW_ATL_RDM_DCADCPUID_SHIFT, cpuid); cpuid 1169 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca) cpuid 1173 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c HW_ATL_TDM_DCADCPUID_SHIFT, cpuid); cpuid 146 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); cpuid 549 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h void hw_atl_tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca); cpuid 681 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 }, cpuid 701 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 }, cpuid 21 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h u16 cpuid; /* bigger than needed, see above for reason */ cpuid 61 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h u8 cpuid; cpuid 1111 drivers/net/ethernet/intel/ice/ice_common.c ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), cpuid 1171 drivers/net/ethernet/intel/ice/ice_common.c ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), cpuid 272 drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h u16 cpuid; /* bigger than needed, see above for reason */ cpuid 433 drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h u16 cpuid; /* bigger than needed, see above for reason */ cpuid 1137 drivers/parisc/sba_iommu.c if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13 cpuid 190 drivers/perf/arm_pmu_acpi.c unsigned long cpuid = read_cpuid_id(); cpuid 196 drivers/perf/arm_pmu_acpi.c if (!pmu || pmu->acpi_cpuid != cpuid) cpuid 209 drivers/perf/arm_pmu_acpi.c pmu->acpi_cpuid = cpuid; cpuid 29 drivers/perf/arm_pmu_platform.c unsigned int cpuid = read_cpuid_id(); cpuid 35 drivers/perf/arm_pmu_platform.c if ((cpuid & info->mask) != info->cpuid) cpuid 847 drivers/s390/cio/css.c struct cpuid cpu_id; cpuid 3252 drivers/scsi/qla2xxx/qla_def.h int cpuid; cpuid 3533 drivers/scsi/qla2xxx/qla_def.h uint16_t cpuid; cpuid 269 drivers/scsi/qla2xxx/qla_inline.h qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid) cpuid 271 drivers/scsi/qla2xxx/qla_inline.h qpair->cpuid = cpuid; cpuid 277 drivers/scsi/qla2xxx/qla_inline.h h->cpuid = qpair->cpuid; cpuid 2999 drivers/scsi/qla2xxx/qla_isr.c if (rsp->qpair->cpuid != smp_processor_id()) cpuid 2099 drivers/scsi/qla2xxx/qla_target.c mcmd->se_cmd.cpuid = h->cpuid; cpuid 2108 drivers/scsi/qla2xxx/qla_target.c mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid; cpuid 2115 drivers/scsi/qla2xxx/qla_target.c queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work); cpuid 4263 drivers/scsi/qla2xxx/qla_target.c cmd->se_cmd.cpuid = h->cpuid; cpuid 4366 drivers/scsi/qla2xxx/qla_target.c queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); cpuid 4372 drivers/scsi/qla2xxx/qla_target.c queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, cpuid 4411 drivers/scsi/qla2xxx/qla_target.c mcmd->se_cmd.cpuid = h->cpuid; cpuid 4423 drivers/scsi/qla2xxx/qla_target.c mcmd->se_cmd.cpuid = h->cpuid; cpuid 4436 drivers/scsi/qla2xxx/qla_target.c queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, cpuid 6377 drivers/scsi/qla2xxx/qla_target.c h->cpuid = ha->base_qpair->cpuid; cpuid 6392 drivers/scsi/qla2xxx/qla_target.c h->cpuid = qpair->cpuid; cpuid 770 drivers/scsi/qla2xxx/qla_target.h u16 cpuid; cpuid 436 drivers/scsi/qla2xxx/tcm_qla2xxx.c if (se_cmd->cpuid != WORK_CPU_UNBOUND) cpuid 51 drivers/soc/tegra/flowctrl.c u32 flowctrl_read_cpu_csr(unsigned int cpuid) cpuid 53 drivers/soc/tegra/flowctrl.c u8 offset = flowctrl_offset_cpu_csr[cpuid]; cpuid 62 drivers/soc/tegra/flowctrl.c void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value) cpuid 64 drivers/soc/tegra/flowctrl.c return flowctrl_update(flowctrl_offset_cpu_csr[cpuid], value); cpuid 67 drivers/soc/tegra/flowctrl.c void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value) cpuid 69 drivers/soc/tegra/flowctrl.c return flowctrl_update(flowctrl_offset_halt_cpu[cpuid], value); cpuid 72 drivers/soc/tegra/flowctrl.c void flowctrl_cpu_suspend_enter(unsigned int cpuid) cpuid 77 drivers/soc/tegra/flowctrl.c reg = flowctrl_read_cpu_csr(cpuid); cpuid 85 drivers/soc/tegra/flowctrl.c reg |= TEGRA20_FLOW_CTRL_CSR_WFE_CPU0 << cpuid; cpuid 95 drivers/soc/tegra/flowctrl.c reg |= TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 << cpuid; cpuid 101 drivers/soc/tegra/flowctrl.c flowctrl_write_cpu_csr(cpuid, reg); cpuid 104 drivers/soc/tegra/flowctrl.c if (i == cpuid) cpuid 113 drivers/soc/tegra/flowctrl.c void flowctrl_cpu_suspend_exit(unsigned int cpuid) cpuid 118 drivers/soc/tegra/flowctrl.c reg = flowctrl_read_cpu_csr(cpuid); cpuid 138 drivers/soc/tegra/flowctrl.c flowctrl_write_cpu_csr(cpuid, reg); cpuid 796 drivers/soc/tegra/pmc.c unsigned int cpuid) cpuid 798 drivers/soc/tegra/pmc.c if (pmc->soc && cpuid < pmc->soc->num_cpu_powergates) cpuid 799 drivers/soc/tegra/pmc.c return pmc->soc->cpu_powergates[cpuid]; cpuid 808 drivers/soc/tegra/pmc.c bool tegra_pmc_cpu_is_powered(unsigned int cpuid) cpuid 812 drivers/soc/tegra/pmc.c id = tegra_get_cpu_powergate_id(pmc, cpuid); cpuid 823 drivers/soc/tegra/pmc.c int tegra_pmc_cpu_power_on(unsigned int cpuid) cpuid 827 drivers/soc/tegra/pmc.c id = tegra_get_cpu_powergate_id(pmc, cpuid); cpuid 838 drivers/soc/tegra/pmc.c int tegra_pmc_cpu_remove_clamping(unsigned int cpuid) cpuid 842 drivers/soc/tegra/pmc.c id = tegra_get_cpu_powergate_id(pmc, cpuid); cpuid 85 drivers/soc/ux500/ux500-soc-id.c unsigned int cpuid = read_cpuid_id(); cpuid 89 drivers/soc/ux500/ux500-soc-id.c switch (cpuid) { cpuid 865 drivers/target/target_core_transport.c queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); cpuid 1583 drivers/target/target_core_transport.c se_cmd->cpuid = WORK_CPU_UNBOUND; cpuid 190 drivers/thermal/intel/intel_powerclamp.c cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); cpuid 339 drivers/thermal/intel/x86_pkg_temp_thermal.c cpuid(6, &eax, &ebx, &ecx, &edx); cpuid 1169 drivers/visorbus/visorchipset.c cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx); cpuid 1658 drivers/visorbus/visorchipset.c cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx); cpuid 78 drivers/xen/pcpu.c .u.cpu_ol.cpuid = cpu_id, cpuid 89 drivers/xen/pcpu.c .u.cpu_ol.cpuid = cpu_id, cpuid 732 drivers/xen/xenbus/xenbus_xs.c cpuid(base + 1, &eax, &ebx, &ecx, &edx); cpuid 344 include/kvm/arm_vgic.h int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, cpuid 270 include/linux/acpi.h static inline bool invalid_logical_cpuid(u32 cpuid) cpuid 272 include/linux/acpi.h return (int)cpuid < 0; cpuid 55 include/linux/arch_topology.h void store_cpu_topology(unsigned int cpuid); cpuid 58 include/linux/arch_topology.h void remove_cpu_topology(unsigned int cpuid); cpuid 370 include/linux/cper.h u8 cpuid[48]; cpuid 132 include/linux/perf/arm_pmu.h unsigned int cpuid; cpuid 139 include/linux/perf/arm_pmu.h .cpuid = (_cpuid), \ cpuid 36 include/linux/psci.h int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); cpuid 37 include/linux/psci.h int (*migrate)(unsigned long cpuid); cpuid 32 include/linux/smp.h int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, cpuid 44 include/soc/tegra/flowctrl.h u32 flowctrl_read_cpu_csr(unsigned int cpuid); cpuid 45 include/soc/tegra/flowctrl.h void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value); cpuid 46 include/soc/tegra/flowctrl.h void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value); cpuid 48 include/soc/tegra/flowctrl.h void flowctrl_cpu_suspend_enter(unsigned int cpuid); cpuid 49 include/soc/tegra/flowctrl.h void flowctrl_cpu_suspend_exit(unsigned int cpuid); cpuid 51 include/soc/tegra/flowctrl.h static inline u32 flowctrl_read_cpu_csr(unsigned int cpuid) cpuid 56 include/soc/tegra/flowctrl.h static inline void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value) cpuid 60 include/soc/tegra/flowctrl.h static inline void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value) {} cpuid 62 include/soc/tegra/flowctrl.h static inline void flowctrl_cpu_suspend_enter(unsigned int cpuid) cpuid 66 include/soc/tegra/flowctrl.h static inline void flowctrl_cpu_suspend_exit(unsigned int cpuid) cpuid 20 include/soc/tegra/pmc.h bool tegra_pmc_cpu_is_powered(unsigned int cpuid); cpuid 21 include/soc/tegra/pmc.h int tegra_pmc_cpu_power_on(unsigned int cpuid); cpuid 22 include/soc/tegra/pmc.h int tegra_pmc_cpu_remove_clamping(unsigned int cpuid); cpuid 538 include/target/target_core_base.h int cpuid; cpuid 30 include/trace/events/mce.h __field( u32, cpuid ) cpuid 50 include/trace/events/mce.h __entry->cpuid = m->cpuid; cpuid 66 include/trace/events/mce.h __entry->cpuvendor, __entry->cpuid, cpuid 455 include/xen/interface/platform.h uint32_t cpuid; cpuid 347 include/xen/interface/xen-mca.h __u32 cpuid; /* CPUID 1 EAX */ cpuid 99 tools/arch/s390/include/uapi/asm/kvm.h __u64 cpuid; cpuid 108 tools/arch/s390/include/uapi/asm/kvm.h __u64 cpuid; cpuid 4 tools/perf/arch/arc/annotate/instructions.c static int arc__annotate_init(struct arch *arch, char *cpuid __maybe_unused) cpuid 30 tools/perf/arch/arm/annotate/instructions.c static int arm__annotate_init(struct arch *arch, char *cpuid __maybe_unused) cpuid 88 tools/perf/arch/arm64/annotate/instructions.c static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused) cpuid 41 tools/perf/arch/csky/annotate/instructions.c static int csky__annotate_init(struct arch *arch, char *cpuid __maybe_unused) cpuid 52 tools/perf/arch/powerpc/annotate/instructions.c static int powerpc__annotate_init(struct arch *arch, char *cpuid __maybe_unused) cpuid 165 tools/perf/arch/powerpc/util/kvm-stat.c int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused) cpuid 139 tools/perf/arch/s390/annotate/instructions.c static int s390__cpuid_parse(struct arch *arch, char *cpuid) cpuid 149 tools/perf/arch/s390/annotate/instructions.c ret = sscanf(cpuid, "%*[^,],%u,%[^,],%[^,],%[^,],%s", &family, model_c, cpuid 160 tools/perf/arch/s390/annotate/instructions.c static int s390__annotate_init(struct arch *arch, char *cpuid __maybe_unused) cpuid 167 tools/perf/arch/s390/annotate/instructions.c if (cpuid) { cpuid 168 tools/perf/arch/s390/annotate/instructions.c if (s390__cpuid_parse(arch, cpuid)) cpuid 102 tools/perf/arch/s390/util/kvm-stat.c int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid) cpuid 104 tools/perf/arch/s390/util/kvm-stat.c if (strstr(cpuid, "IBM")) { cpuid 160 tools/perf/arch/sparc/annotate/instructions.c static int sparc__annotate_init(struct arch *arch, char *cpuid __maybe_unused) cpuid 174 tools/perf/arch/x86/annotate/instructions.c static int x86__cpuid_parse(struct arch *arch, char *cpuid) cpuid 182 tools/perf/arch/x86/annotate/instructions.c ret = sscanf(cpuid, "%*[^,],%u,%u,%u", &family, &model, &stepping); cpuid 192 tools/perf/arch/x86/annotate/instructions.c static int x86__annotate_init(struct arch *arch, char *cpuid) cpuid 199 tools/perf/arch/x86/annotate/instructions.c if (cpuid) { cpuid 200 tools/perf/arch/x86/annotate/instructions.c if (x86__cpuid_parse(arch, cpuid)) cpuid 34 tools/perf/arch/x86/util/header.c cpuid(0, &lvl, &b, &c, &d); cpuid 41 tools/perf/arch/x86/util/header.c cpuid(1, &a, &b, &c, &d); cpuid 156 tools/perf/arch/x86/util/kvm-stat.c int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid) cpuid 158 tools/perf/arch/x86/util/kvm-stat.c if (strstr(cpuid, "Intel")) { cpuid 161 tools/perf/arch/x86/util/kvm-stat.c } else if (strstr(cpuid, "AMD") || strstr(cpuid, "Hygon")) { cpuid 708 tools/perf/builtin-kvm.c char buf[128], *cpuid; cpuid 718 tools/perf/builtin-kvm.c cpuid = buf; cpuid 720 tools/perf/builtin-kvm.c cpuid = kvm->session->header.env.cpuid; cpuid 722 tools/perf/builtin-kvm.c if (!cpuid) { cpuid 727 tools/perf/builtin-kvm.c err = cpu_isa_init(kvm, cpuid); cpuid 729 tools/perf/builtin-kvm.c pr_err("CPU %s is not supported.\n", cpuid); cpuid 786 tools/perf/pmu-events/jevents.c char *cpuid, *version, *type, *fname; cpuid 805 tools/perf/pmu-events/jevents.c cpuid = fixregex(strtok_r(p, ",", &save)); cpuid 812 tools/perf/pmu-events/jevents.c fprintf(outfp, "\t.cpuid = \"%s\",\n", cpuid); cpuid 32 tools/perf/pmu-events/pmu-events.h const char *cpuid; cpuid 511 tools/perf/tests/code-reading.c char cpuid[128], model[16], model_c[16], cpum_cf_v[16]; cpuid 515 tools/perf/tests/code-reading.c if (get_cpuid(cpuid, sizeof(cpuid))) cpuid 517 tools/perf/tests/code-reading.c ret = sscanf(cpuid, "%*[^,],%u,%[^,],%[^,],%[^,],%x", &family, model_c, cpuid 86 tools/perf/util/annotate.c int (*init)(struct arch *arch, char *cpuid); cpuid 2101 tools/perf/util/annotate.c err = arch->init(arch, env ? env->cpuid : NULL); cpuid 175 tools/perf/util/env.c zfree(&env->cpuid); cpuid 48 tools/perf/util/env.h char *cpuid; cpuid 823 tools/perf/util/header.c int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) cpuid 835 tools/perf/util/header.c match = !regexec(&re, cpuid, 1, pmatch, 0); cpuid 841 tools/perf/util/header.c if (match_len == strlen(cpuid)) cpuid 1739 tools/perf/util/header.c fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid); cpuid 2054 tools/perf/util/header.c FEAT_PROCESS_STR_FUN(cpuid, cpuid); cpuid 2850 tools/perf/util/header.c FEAT_OPR(CPUID, cpuid, false), cpuid 937 tools/perf/util/intel-pt.c if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18)) cpuid 139 tools/perf/util/kvm-stat.h int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid); cpuid 661 tools/perf/util/pmu.c char *cpuid; cpuid 664 tools/perf/util/pmu.c cpuid = getenv("PERF_CPUID"); cpuid 665 tools/perf/util/pmu.c if (cpuid) cpuid 666 tools/perf/util/pmu.c cpuid = strdup(cpuid); cpuid 667 tools/perf/util/pmu.c if (!cpuid) cpuid 668 tools/perf/util/pmu.c cpuid = get_cpuid_str(pmu); cpuid 669 tools/perf/util/pmu.c if (!cpuid) cpuid 673 tools/perf/util/pmu.c pr_debug("Using CPUID %s\n", cpuid); cpuid 676 tools/perf/util/pmu.c return cpuid; cpuid 682 tools/perf/util/pmu.c char *cpuid = perf_pmu__getcpuid(pmu); cpuid 688 tools/perf/util/pmu.c if (!cpuid) cpuid 699 tools/perf/util/pmu.c if (!strcmp_cpuid_str(map->cpuid, cpuid)) cpuid 702 tools/perf/util/pmu.c free(cpuid); cpuid 1050 tools/perf/util/s390-cpumsf.c static int s390_cpumsf_get_type(const char *cpuid) cpuid 1054 tools/perf/util/s390-cpumsf.c ret = sscanf(cpuid, "%*[^,],%u", &family); cpuid 1138 tools/perf/util/s390-cpumsf.c sf->machine_type = s390_cpumsf_get_type(session->evlist->env->cpuid); cpuid 50 tools/power/cpupower/debug/i386/dump_psb.c u_int32_t cpuid; cpuid 115 tools/power/cpupower/debug/i386/dump_psb.c if (relevant!= pst->cpuid) cpuid 121 tools/power/cpupower/debug/i386/dump_psb.c pst->cpuid, cpuid 313 tools/testing/selftests/kvm/include/x86_64/processor.h struct kvm_cpuid2 *cpuid); cpuid 689 tools/testing/selftests/kvm/lib/x86_64/processor.c struct kvm_cpuid2 *cpuid; cpuid 693 tools/testing/selftests/kvm/lib/x86_64/processor.c size = sizeof(*cpuid); cpuid 695 tools/testing/selftests/kvm/lib/x86_64/processor.c cpuid = malloc(size); cpuid 696 tools/testing/selftests/kvm/lib/x86_64/processor.c if (!cpuid) { cpuid 701 tools/testing/selftests/kvm/lib/x86_64/processor.c cpuid->nent = nent; cpuid 703 tools/testing/selftests/kvm/lib/x86_64/processor.c return cpuid; cpuid 718 tools/testing/selftests/kvm/lib/x86_64/processor.c static struct kvm_cpuid2 *cpuid; cpuid 722 tools/testing/selftests/kvm/lib/x86_64/processor.c if (cpuid) cpuid 723 tools/testing/selftests/kvm/lib/x86_64/processor.c return cpuid; cpuid 725 tools/testing/selftests/kvm/lib/x86_64/processor.c cpuid = allocate_kvm_cpuid2(); cpuid 730 tools/testing/selftests/kvm/lib/x86_64/processor.c ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid); cpuid 735 tools/testing/selftests/kvm/lib/x86_64/processor.c return cpuid; cpuid 751 tools/testing/selftests/kvm/lib/x86_64/processor.c struct kvm_cpuid2 *cpuid; cpuid 755 tools/testing/selftests/kvm/lib/x86_64/processor.c cpuid = kvm_get_supported_cpuid(); cpuid 756 tools/testing/selftests/kvm/lib/x86_64/processor.c for (i = 0; i < cpuid->nent; i++) { cpuid 757 tools/testing/selftests/kvm/lib/x86_64/processor.c if (cpuid->entries[i].function == function && cpuid 758 tools/testing/selftests/kvm/lib/x86_64/processor.c cpuid->entries[i].index == index) { cpuid 759 tools/testing/selftests/kvm/lib/x86_64/processor.c entry = &cpuid->entries[i]; cpuid 783 tools/testing/selftests/kvm/lib/x86_64/processor.c uint32_t vcpuid, struct kvm_cpuid2 *cpuid) cpuid 790 tools/testing/selftests/kvm/lib/x86_64/processor.c rc = ioctl(vcpu->fd, KVM_SET_CPUID2, cpuid); cpuid 102 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c static struct kvm_cpuid2 cpuid = {.nent = 0}; cpuid 105 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c ret = _vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, &cpuid); cpuid 116 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c static struct kvm_cpuid2 *cpuid; cpuid 118 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c cpuid = malloc(sizeof(*cpuid) + nent * sizeof(struct kvm_cpuid_entry2)); cpuid 120 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c if (!cpuid) { cpuid 125 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c cpuid->nent = nent; cpuid 127 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, cpuid); cpuid 129 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c return cpuid; cpuid 397 tools/testing/selftests/net/psock_fanout.c static int set_cpuaffinity(int cpuid) cpuid 402 tools/testing/selftests/net/psock_fanout.c CPU_SET(cpuid, &mask); cpuid 405 tools/testing/selftests/net/psock_fanout.c fprintf(stderr, "setaffinity %d\n", cpuid); cpuid 284 virt/kvm/arm/vgic/vgic-kvm-device.c int cpuid; cpuid 286 virt/kvm/arm/vgic/vgic-kvm-device.c cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> cpuid 289 virt/kvm/arm/vgic/vgic-kvm-device.c if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) cpuid 292 virt/kvm/arm/vgic/vgic-kvm-device.c reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid); cpuid 61 virt/kvm/arm/vgic/vgic-v2.c u32 cpuid, intid = val & GICH_LR_VIRTUALID; cpuid 65 virt/kvm/arm/vgic/vgic-v2.c cpuid = val & GICH_LR_PHYSID_CPUID; cpuid 66 virt/kvm/arm/vgic/vgic-v2.c cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; cpuid 67 virt/kvm/arm/vgic/vgic-v2.c cpuid &= 7; cpuid 82 virt/kvm/arm/vgic/vgic-v2.c irq->active_source = cpuid; cpuid 90 virt/kvm/arm/vgic/vgic-v2.c irq->source |= (1 << cpuid); cpuid 44 virt/kvm/arm/vgic/vgic-v3.c u32 intid, cpuid; cpuid 48 virt/kvm/arm/vgic/vgic-v3.c cpuid = val & GICH_LR_PHYSID_CPUID; cpuid 49 virt/kvm/arm/vgic/vgic-v3.c cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; cpuid 73 virt/kvm/arm/vgic/vgic-v3.c irq->active_source = cpuid; cpuid 81 virt/kvm/arm/vgic/vgic-v3.c irq->source |= (1 << cpuid); cpuid 437 virt/kvm/arm/vgic/vgic.c int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, cpuid 445 virt/kvm/arm/vgic/vgic.c trace_vgic_update_irq_pending(cpuid, intid, level); cpuid 451 virt/kvm/arm/vgic/vgic.c vcpu = kvm_get_vcpu(kvm, cpuid);