cluster 155 arch/alpha/include/asm/hwrpb.h struct memclust_struct cluster[0]; cluster 265 arch/alpha/kernel/setup.c for ((_cluster) = (memdesc)->cluster, (i) = 0; \ cluster 313 arch/alpha/kernel/setup.c struct memclust_struct * cluster; cluster 322 arch/alpha/kernel/setup.c for_each_mem_cluster(memdesc, cluster, i) { cluster 326 arch/alpha/kernel/setup.c i, cluster->usage, cluster->start_pfn, cluster 327 arch/alpha/kernel/setup.c cluster->start_pfn + cluster->numpages); cluster 332 arch/alpha/kernel/setup.c if (cluster->usage & 3) cluster 335 arch/alpha/kernel/setup.c end = cluster->start_pfn + cluster->numpages; cluster 339 arch/alpha/kernel/setup.c memblock_add(PFN_PHYS(cluster->start_pfn), cluster 340 arch/alpha/kernel/setup.c cluster->numpages << PAGE_SHIFT); cluster 400 arch/alpha/kernel/setup.c struct memclust_struct * cluster; cluster 406 arch/alpha/kernel/setup.c for_each_mem_cluster(memdesc, cluster, i) cluster 408 arch/alpha/kernel/setup.c if (pfn >= cluster->start_pfn && cluster 409 arch/alpha/kernel/setup.c pfn < cluster->start_pfn + cluster->numpages) { cluster 410 arch/alpha/kernel/setup.c return (cluster->usage & 3) ? 0 : 1; cluster 34 arch/alpha/mm/numa.c for ((_cluster) = (memdesc)->cluster, (i) = 0; \ cluster 39 arch/alpha/mm/numa.c struct memclust_struct * cluster; cluster 48 arch/alpha/mm/numa.c for_each_mem_cluster(memdesc, cluster, i) { cluster 50 arch/alpha/mm/numa.c i, cluster->usage, cluster->start_pfn, cluster 51 arch/alpha/mm/numa.c cluster->start_pfn + cluster->numpages); cluster 59 arch/alpha/mm/numa.c struct memclust_struct * cluster; cluster 79 arch/alpha/mm/numa.c for_each_mem_cluster(memdesc, cluster, i) { cluster 83 arch/alpha/mm/numa.c if (cluster->usage & 3) cluster 86 arch/alpha/mm/numa.c start = cluster->start_pfn; cluster 87 arch/alpha/mm/numa.c end = start + cluster->numpages; cluster 97 arch/alpha/mm/numa.c i, cluster->usage, cluster->start_pfn, cluster 98 arch/alpha/mm/numa.c cluster->start_pfn + cluster->numpages); cluster 104 arch/arc/plat-eznps/smp.c u32 num:8, cluster:8, core:8, thread:8; cluster 113 arch/arc/plat-eznps/smp.c ipi.cluster = nps_cluster_logic_to_phys(gid.cluster); cluster 274 arch/arm/common/bL_switcher.c int cluster; cluster 289 arch/arm/common/bL_switcher.c cluster = t->wanted_cluster; cluster 296 arch/arm/common/bL_switcher.c if (cluster != -1) { cluster 297 arch/arm/common/bL_switcher.c bL_switch_to(cluster); cluster 423 arch/arm/common/bL_switcher.c unsigned int cpu, cluster, mask; cluster 430 arch/arm/common/bL_switcher.c cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); cluster 431 arch/arm/common/bL_switcher.c if (cluster >= 2) { cluster 437 arch/arm/common/bL_switcher.c mask |= (1 << cluster); cluster 455 arch/arm/common/bL_switcher.c cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); cluster 457 arch/arm/common/bL_switcher.c cluster_0 = cluster; cluster 458 arch/arm/common/bL_switcher.c if (cluster != cluster_0) cluster 462 arch/arm/common/bL_switcher.c cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1); cluster 469 arch/arm/common/bL_switcher.c if (cluster != cluster_0) cluster 486 arch/arm/common/bL_switcher.c cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1); cluster 495 arch/arm/common/bL_switcher.c bL_gic_id[cpu][cluster] = gic_id; cluster 497 arch/arm/common/bL_switcher.c cpu, cluster, gic_id); cluster 500 arch/arm/common/bL_switcher.c bL_switcher_cpu_original_cluster[i] = cluster; cluster 605 arch/arm/common/bL_switcher.c unsigned int cpu, cluster; cluster 638 arch/arm/common/bL_switcher.c cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); cluster 639 arch/arm/common/bL_switcher.c if (cluster == bL_switcher_cpu_original_cluster[cpu]) cluster 647 arch/arm/common/bL_switcher.c cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1); cluster 648 arch/arm/common/bL_switcher.c if (cluster == bL_switcher_cpu_original_cluster[cpu]) cluster 22 arch/arm/common/bL_switcher_dummy_if.c unsigned int cpu, cluster; cluster 40 arch/arm/common/bL_switcher_dummy_if.c cluster = val[2] - '0'; cluster 41 arch/arm/common/bL_switcher_dummy_if.c ret = bL_switch_request(cpu, cluster); cluster 34 arch/arm/common/mcpm_entry.c static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) cluster 36 arch/arm/common/mcpm_entry.c mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; cluster 37 arch/arm/common/mcpm_entry.c sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); cluster 47 arch/arm/common/mcpm_entry.c static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) cluster 50 arch/arm/common/mcpm_entry.c mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; cluster 51 arch/arm/common/mcpm_entry.c sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); cluster 63 arch/arm/common/mcpm_entry.c static void __mcpm_outbound_leave_critical(unsigned int cluster, int state) cluster 66 arch/arm/common/mcpm_entry.c mcpm_sync.clusters[cluster].cluster = state; cluster 67 arch/arm/common/mcpm_entry.c sync_cache_w(&mcpm_sync.clusters[cluster].cluster); cluster 82 arch/arm/common/mcpm_entry.c static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) cluster 85 arch/arm/common/mcpm_entry.c struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; cluster 88 arch/arm/common/mcpm_entry.c c->cluster = CLUSTER_GOING_DOWN; cluster 89 arch/arm/common/mcpm_entry.c sync_cache_w(&c->cluster); cluster 131 arch/arm/common/mcpm_entry.c __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); cluster 135 arch/arm/common/mcpm_entry.c static int __mcpm_cluster_state(unsigned int cluster) cluster 137 arch/arm/common/mcpm_entry.c sync_cache_r(&mcpm_sync.clusters[cluster].cluster); cluster 138 arch/arm/common/mcpm_entry.c return mcpm_sync.clusters[cluster].cluster; cluster 143 arch/arm/common/mcpm_entry.c void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) cluster 146 arch/arm/common/mcpm_entry.c mcpm_entry_vectors[cluster][cpu] = val; cluster 147 arch/arm/common/mcpm_entry.c sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); cluster 152 arch/arm/common/mcpm_entry.c void mcpm_set_early_poke(unsigned cpu, unsigned cluster, cluster 155 arch/arm/common/mcpm_entry.c unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0]; cluster 187 arch/arm/common/mcpm_entry.c static inline bool mcpm_cluster_unused(unsigned int cluster) cluster 191 arch/arm/common/mcpm_entry.c cnt |= mcpm_cpu_use_count[cluster][i]; cluster 195 arch/arm/common/mcpm_entry.c int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) cluster 200 arch/arm/common/mcpm_entry.c pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); cluster 212 arch/arm/common/mcpm_entry.c cpu_is_down = !mcpm_cpu_use_count[cluster][cpu]; cluster 213 arch/arm/common/mcpm_entry.c cluster_is_down = mcpm_cluster_unused(cluster); cluster 215 arch/arm/common/mcpm_entry.c mcpm_cpu_use_count[cluster][cpu]++; cluster 224 arch/arm/common/mcpm_entry.c BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 && cluster 225 arch/arm/common/mcpm_entry.c mcpm_cpu_use_count[cluster][cpu] != 2); cluster 228 arch/arm/common/mcpm_entry.c ret = platform_ops->cluster_powerup(cluster); cluster 230 arch/arm/common/mcpm_entry.c ret = platform_ops->cpu_powerup(cpu, cluster); cluster 241 arch/arm/common/mcpm_entry.c unsigned int mpidr, cpu, cluster; cluster 247 arch/arm/common/mcpm_entry.c cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 248 arch/arm/common/mcpm_entry.c pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); cluster 255 arch/arm/common/mcpm_entry.c __mcpm_cpu_going_down(cpu, cluster); cluster 257 arch/arm/common/mcpm_entry.c BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); cluster 259 arch/arm/common/mcpm_entry.c mcpm_cpu_use_count[cluster][cpu]--; cluster 260 arch/arm/common/mcpm_entry.c BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 && cluster 261 arch/arm/common/mcpm_entry.c mcpm_cpu_use_count[cluster][cpu] != 1); cluster 262 arch/arm/common/mcpm_entry.c cpu_going_down = !mcpm_cpu_use_count[cluster][cpu]; cluster 263 arch/arm/common/mcpm_entry.c last_man = mcpm_cluster_unused(cluster); cluster 265 arch/arm/common/mcpm_entry.c if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { cluster 266 arch/arm/common/mcpm_entry.c platform_ops->cpu_powerdown_prepare(cpu, cluster); cluster 267 arch/arm/common/mcpm_entry.c platform_ops->cluster_powerdown_prepare(cluster); cluster 270 arch/arm/common/mcpm_entry.c __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); cluster 273 arch/arm/common/mcpm_entry.c platform_ops->cpu_powerdown_prepare(cpu, cluster); cluster 286 arch/arm/common/mcpm_entry.c __mcpm_cpu_down(cpu, cluster); cluster 308 arch/arm/common/mcpm_entry.c int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) cluster 315 arch/arm/common/mcpm_entry.c ret = platform_ops->wait_for_powerdown(cpu, cluster); cluster 318 arch/arm/common/mcpm_entry.c __func__, cpu, cluster, ret); cluster 332 arch/arm/common/mcpm_entry.c unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 334 arch/arm/common/mcpm_entry.c platform_ops->cpu_suspend_prepare(cpu, cluster); cluster 342 arch/arm/common/mcpm_entry.c unsigned int mpidr, cpu, cluster; cluster 351 arch/arm/common/mcpm_entry.c cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 355 arch/arm/common/mcpm_entry.c cpu_was_down = !mcpm_cpu_use_count[cluster][cpu]; cluster 356 arch/arm/common/mcpm_entry.c first_man = mcpm_cluster_unused(cluster); cluster 359 arch/arm/common/mcpm_entry.c platform_ops->cluster_is_up(cluster); cluster 361 arch/arm/common/mcpm_entry.c mcpm_cpu_use_count[cluster][cpu] = 1; cluster 363 arch/arm/common/mcpm_entry.c platform_ops->cpu_is_up(cpu, cluster); cluster 378 arch/arm/common/mcpm_entry.c unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 381 arch/arm/common/mcpm_entry.c mcpm_set_entry_vector(cpu, cluster, cpu_resume_no_hyp); cluster 384 arch/arm/common/mcpm_entry.c __mcpm_cpu_going_down(cpu, cluster); cluster 385 arch/arm/common/mcpm_entry.c BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster)); cluster 387 arch/arm/common/mcpm_entry.c __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); cluster 388 arch/arm/common/mcpm_entry.c __mcpm_cpu_down(cpu, cluster); cluster 436 arch/arm/common/mcpm_entry.c mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; cluster 447 arch/arm/common/mcpm_entry.c mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; cluster 44 arch/arm/include/asm/mcpm.h void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); cluster 51 arch/arm/include/asm/mcpm.h void mcpm_set_early_poke(unsigned cpu, unsigned cluster, cluster 84 arch/arm/include/asm/mcpm.h int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); cluster 132 arch/arm/include/asm/mcpm.h int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster); cluster 219 arch/arm/include/asm/mcpm.h int (*cpu_powerup)(unsigned int cpu, unsigned int cluster); cluster 220 arch/arm/include/asm/mcpm.h int (*cluster_powerup)(unsigned int cluster); cluster 221 arch/arm/include/asm/mcpm.h void (*cpu_suspend_prepare)(unsigned int cpu, unsigned int cluster); cluster 222 arch/arm/include/asm/mcpm.h void (*cpu_powerdown_prepare)(unsigned int cpu, unsigned int cluster); cluster 223 arch/arm/include/asm/mcpm.h void (*cluster_powerdown_prepare)(unsigned int cluster); cluster 226 arch/arm/include/asm/mcpm.h void (*cpu_is_up)(unsigned int cpu, unsigned int cluster); cluster 227 arch/arm/include/asm/mcpm.h void (*cluster_is_up)(unsigned int cluster); cluster 228 arch/arm/include/asm/mcpm.h int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster); cluster 291 arch/arm/include/asm/mcpm.h s8 cluster __aligned(__CACHE_WRITEBACK_GRANULE); cluster 132 arch/arm/mach-exynos/common.h extern void exynos_cluster_power_down(int cluster); cluster 133 arch/arm/mach-exynos/common.h extern void exynos_cluster_power_up(int cluster); cluster 134 arch/arm/mach-exynos/common.h extern int exynos_cluster_power_state(int cluster); cluster 58 arch/arm/mach-exynos/mcpm-exynos.c static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster) cluster 60 arch/arm/mach-exynos/mcpm-exynos.c unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); cluster 62 arch/arm/mach-exynos/mcpm-exynos.c pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); cluster 64 arch/arm/mach-exynos/mcpm-exynos.c cluster >= EXYNOS5420_NR_CLUSTERS) cluster 76 arch/arm/mach-exynos/mcpm-exynos.c if (cluster && cluster 77 arch/arm/mach-exynos/mcpm-exynos.c cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) { cluster 93 arch/arm/mach-exynos/mcpm-exynos.c cpu, cluster); cluster 106 arch/arm/mach-exynos/mcpm-exynos.c static int exynos_cluster_powerup(unsigned int cluster) cluster 108 arch/arm/mach-exynos/mcpm-exynos.c pr_debug("%s: cluster %u\n", __func__, cluster); cluster 109 arch/arm/mach-exynos/mcpm-exynos.c if (cluster >= EXYNOS5420_NR_CLUSTERS) cluster 112 arch/arm/mach-exynos/mcpm-exynos.c exynos_cluster_power_up(cluster); cluster 116 arch/arm/mach-exynos/mcpm-exynos.c static void exynos_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) cluster 118 arch/arm/mach-exynos/mcpm-exynos.c unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); cluster 120 arch/arm/mach-exynos/mcpm-exynos.c pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); cluster 122 arch/arm/mach-exynos/mcpm-exynos.c cluster >= EXYNOS5420_NR_CLUSTERS); cluster 126 arch/arm/mach-exynos/mcpm-exynos.c static void exynos_cluster_powerdown_prepare(unsigned int cluster) cluster 128 arch/arm/mach-exynos/mcpm-exynos.c pr_debug("%s: cluster %u\n", __func__, cluster); cluster 129 arch/arm/mach-exynos/mcpm-exynos.c BUG_ON(cluster >= EXYNOS5420_NR_CLUSTERS); cluster 130 arch/arm/mach-exynos/mcpm-exynos.c exynos_cluster_power_down(cluster); cluster 163 arch/arm/mach-exynos/mcpm-exynos.c static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster) cluster 166 arch/arm/mach-exynos/mcpm-exynos.c unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER); cluster 168 arch/arm/mach-exynos/mcpm-exynos.c pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); cluster 170 arch/arm/mach-exynos/mcpm-exynos.c cluster >= EXYNOS5420_NR_CLUSTERS); cluster 184 arch/arm/mach-exynos/mcpm-exynos.c static void exynos_cpu_is_up(unsigned int cpu, unsigned int cluster) cluster 187 arch/arm/mach-exynos/mcpm-exynos.c exynos_cpu_powerup(cpu, cluster); cluster 143 arch/arm/mach-exynos/platsmp.c void exynos_cluster_power_down(int cluster) cluster 145 arch/arm/mach-exynos/platsmp.c pmu_raw_writel(0, EXYNOS_COMMON_CONFIGURATION(cluster)); cluster 152 arch/arm/mach-exynos/platsmp.c void exynos_cluster_power_up(int cluster) cluster 155 arch/arm/mach-exynos/platsmp.c EXYNOS_COMMON_CONFIGURATION(cluster)); cluster 163 arch/arm/mach-exynos/platsmp.c int exynos_cluster_power_state(int cluster) cluster 165 arch/arm/mach-exynos/platsmp.c return (pmu_raw_readl(EXYNOS_COMMON_STATUS(cluster)) & cluster 268 arch/arm/mach-exynos/suspend.c unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 272 arch/arm/mach-exynos/suspend.c mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume); cluster 453 arch/arm/mach-exynos/suspend.c unsigned int mpidr, cluster; cluster 456 arch/arm/mach-exynos/suspend.c cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 461 arch/arm/mach-exynos/suspend.c if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) { cluster 71 arch/arm/mach-hisi/platmcpm.c static bool hip04_cluster_is_down(unsigned int cluster) cluster 76 arch/arm/mach-hisi/platmcpm.c if (hip04_cpu_table[cluster][i]) cluster 81 arch/arm/mach-hisi/platmcpm.c static void hip04_set_snoop_filter(unsigned int cluster, unsigned int on) cluster 89 arch/arm/mach-hisi/platmcpm.c data |= 1 << cluster; cluster 91 arch/arm/mach-hisi/platmcpm.c data &= ~(1 << cluster); cluster 100 arch/arm/mach-hisi/platmcpm.c unsigned int mpidr, cpu, cluster; cluster 106 arch/arm/mach-hisi/platmcpm.c cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 110 arch/arm/mach-hisi/platmcpm.c if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) cluster 115 arch/arm/mach-hisi/platmcpm.c if (hip04_cpu_table[cluster][cpu]) cluster 118 arch/arm/mach-hisi/platmcpm.c sys_dreq = sysctrl + SC_CPU_RESET_DREQ(cluster); cluster 119 arch/arm/mach-hisi/platmcpm.c sys_status = sysctrl + SC_CPU_RESET_STATUS(cluster); cluster 120 arch/arm/mach-hisi/platmcpm.c if (hip04_cluster_is_down(cluster)) { cluster 127 arch/arm/mach-hisi/platmcpm.c hip04_set_snoop_filter(cluster, 1); cluster 146 arch/arm/mach-hisi/platmcpm.c hip04_cpu_table[cluster][cpu]++; cluster 155 arch/arm/mach-hisi/platmcpm.c unsigned int mpidr, cpu, cluster; cluster 160 arch/arm/mach-hisi/platmcpm.c cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 163 arch/arm/mach-hisi/platmcpm.c hip04_cpu_table[cluster][cpu]--; cluster 164 arch/arm/mach-hisi/platmcpm.c if (hip04_cpu_table[cluster][cpu] == 1) { cluster 168 arch/arm/mach-hisi/platmcpm.c } else if (hip04_cpu_table[cluster][cpu] > 1) { cluster 169 arch/arm/mach-hisi/platmcpm.c pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu); cluster 173 arch/arm/mach-hisi/platmcpm.c last_man = hip04_cluster_is_down(cluster); cluster 193 arch/arm/mach-hisi/platmcpm.c unsigned int mpidr, cpu, cluster; cluster 198 arch/arm/mach-hisi/platmcpm.c cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 199 arch/arm/mach-hisi/platmcpm.c BUG_ON(cluster >= HIP04_MAX_CLUSTERS || cluster 205 arch/arm/mach-hisi/platmcpm.c if (hip04_cpu_table[cluster][cpu]) cluster 208 arch/arm/mach-hisi/platmcpm.c data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); cluster 220 arch/arm/mach-hisi/platmcpm.c writel_relaxed(data, sysctrl + SC_CPU_RESET_REQ(cluster)); cluster 223 arch/arm/mach-hisi/platmcpm.c data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); cluster 229 arch/arm/mach-hisi/platmcpm.c if (hip04_cluster_is_down(cluster)) cluster 230 arch/arm/mach-hisi/platmcpm.c hip04_set_snoop_filter(cluster, 0); cluster 249 arch/arm/mach-hisi/platmcpm.c unsigned int mpidr, cpu, cluster; cluster 253 arch/arm/mach-hisi/platmcpm.c cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 255 arch/arm/mach-hisi/platmcpm.c if (cluster >= HIP04_MAX_CLUSTERS || cluster 260 arch/arm/mach-hisi/platmcpm.c hip04_set_snoop_filter(cluster, 1); cluster 261 arch/arm/mach-hisi/platmcpm.c hip04_cpu_table[cluster][cpu] = 1; cluster 25 arch/arm/mach-milbeaut/platsmp.c unsigned int mpidr, cpu, cluster; cluster 32 arch/arm/mach-milbeaut/platsmp.c cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 38 arch/arm/mach-milbeaut/platsmp.c __func__, cpu, l_cpu, cluster); cluster 48 arch/arm/mach-milbeaut/platsmp.c unsigned int mpidr, cpu, cluster; cluster 61 arch/arm/mach-milbeaut/platsmp.c cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 62 arch/arm/mach-milbeaut/platsmp.c pr_info("MCPM boot on cpu_%u cluster_%u\n", cpu, cluster); cluster 88 arch/arm/mach-sunxi/mc_smp.c static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster) cluster 91 arch/arm/mach-sunxi/mc_smp.c int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core; cluster 106 arch/arm/mach-sunxi/mc_smp.c __func__, cluster, core); cluster 116 arch/arm/mach-sunxi/mc_smp.c static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster, cluster 122 arch/arm/mach-sunxi/mc_smp.c reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); cluster 126 arch/arm/mach-sunxi/mc_smp.c cluster, cpu); cluster 130 arch/arm/mach-sunxi/mc_smp.c writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); cluster 132 arch/arm/mach-sunxi/mc_smp.c writel(0xfe, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); cluster 134 arch/arm/mach-sunxi/mc_smp.c writel(0xf8, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); cluster 136 arch/arm/mach-sunxi/mc_smp.c writel(0xf0, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); cluster 138 arch/arm/mach-sunxi/mc_smp.c writel(0x00, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); cluster 141 arch/arm/mach-sunxi/mc_smp.c writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); cluster 159 arch/arm/mach-sunxi/mc_smp.c static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster) cluster 163 arch/arm/mach-sunxi/mc_smp.c pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); cluster 164 arch/arm/mach-sunxi/mc_smp.c if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS) cluster 168 arch/arm/mach-sunxi/mc_smp.c if (cluster == 0 && cpu == 0) cluster 172 arch/arm/mach-sunxi/mc_smp.c reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); cluster 174 arch/arm/mach-sunxi/mc_smp.c writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); cluster 179 arch/arm/mach-sunxi/mc_smp.c R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); cluster 182 arch/arm/mach-sunxi/mc_smp.c R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); cluster 187 arch/arm/mach-sunxi/mc_smp.c if (!sunxi_core_is_cortex_a15(cpu, cluster)) { cluster 188 arch/arm/mach-sunxi/mc_smp.c reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); cluster 190 arch/arm/mach-sunxi/mc_smp.c writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); cluster 194 arch/arm/mach-sunxi/mc_smp.c reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); cluster 201 arch/arm/mach-sunxi/mc_smp.c if (!sunxi_core_is_cortex_a15(cpu, cluster)) cluster 204 arch/arm/mach-sunxi/mc_smp.c writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); cluster 207 arch/arm/mach-sunxi/mc_smp.c sunxi_cpu_power_switch_set(cpu, cluster, true); cluster 216 arch/arm/mach-sunxi/mc_smp.c reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); cluster 218 arch/arm/mach-sunxi/mc_smp.c writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); cluster 228 arch/arm/mach-sunxi/mc_smp.c reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); cluster 230 arch/arm/mach-sunxi/mc_smp.c writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); cluster 234 arch/arm/mach-sunxi/mc_smp.c R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); cluster 237 arch/arm/mach-sunxi/mc_smp.c R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); cluster 242 arch/arm/mach-sunxi/mc_smp.c reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); cluster 245 arch/arm/mach-sunxi/mc_smp.c if (!sunxi_core_is_cortex_a15(cpu, cluster)) cluster 249 arch/arm/mach-sunxi/mc_smp.c writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); cluster 254 arch/arm/mach-sunxi/mc_smp.c static int sunxi_cluster_powerup(unsigned int cluster) cluster 258 arch/arm/mach-sunxi/mc_smp.c pr_debug("%s: cluster %u\n", __func__, cluster); cluster 259 arch/arm/mach-sunxi/mc_smp.c if (cluster >= SUNXI_NR_CLUSTERS) cluster 264 arch/arm/mach-sunxi/mc_smp.c reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); cluster 266 arch/arm/mach-sunxi/mc_smp.c writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); cluster 271 arch/arm/mach-sunxi/mc_smp.c reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); cluster 273 arch/arm/mach-sunxi/mc_smp.c writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); cluster 276 arch/arm/mach-sunxi/mc_smp.c reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); cluster 278 arch/arm/mach-sunxi/mc_smp.c writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); cluster 283 arch/arm/mach-sunxi/mc_smp.c R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); cluster 286 arch/arm/mach-sunxi/mc_smp.c R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); cluster 291 arch/arm/mach-sunxi/mc_smp.c reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); cluster 301 arch/arm/mach-sunxi/mc_smp.c if (!sunxi_core_is_cortex_a15(0, cluster)) cluster 304 arch/arm/mach-sunxi/mc_smp.c writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); cluster 307 arch/arm/mach-sunxi/mc_smp.c reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); cluster 308 arch/arm/mach-sunxi/mc_smp.c if (sunxi_core_is_cortex_a15(0, cluster)) { cluster 316 arch/arm/mach-sunxi/mc_smp.c writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); cluster 319 arch/arm/mach-sunxi/mc_smp.c reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); cluster 324 arch/arm/mach-sunxi/mc_smp.c writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); cluster 328 arch/arm/mach-sunxi/mc_smp.c reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); cluster 332 arch/arm/mach-sunxi/mc_smp.c writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); cluster 335 arch/arm/mach-sunxi/mc_smp.c reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); cluster 337 arch/arm/mach-sunxi/mc_smp.c writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); cluster 375 arch/arm/mach-sunxi/mc_smp.c static bool sunxi_mc_smp_cluster_is_down(unsigned int cluster) cluster 380 arch/arm/mach-sunxi/mc_smp.c if (sunxi_mc_smp_cpu_table[cluster][i]) cluster 394 arch/arm/mach-sunxi/mc_smp.c unsigned int mpidr, cpu, cluster; cluster 398 arch/arm/mach-sunxi/mc_smp.c cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 402 arch/arm/mach-sunxi/mc_smp.c if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) cluster 407 arch/arm/mach-sunxi/mc_smp.c if (sunxi_mc_smp_cpu_table[cluster][cpu]) cluster 410 arch/arm/mach-sunxi/mc_smp.c if (sunxi_mc_smp_cluster_is_down(cluster)) { cluster 412 arch/arm/mach-sunxi/mc_smp.c sunxi_cluster_powerup(cluster); cluster 419 arch/arm/mach-sunxi/mc_smp.c sunxi_cpu_powerup(cpu, cluster); cluster 422 arch/arm/mach-sunxi/mc_smp.c sunxi_mc_smp_cpu_table[cluster][cpu]++; cluster 431 arch/arm/mach-sunxi/mc_smp.c unsigned int cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1); cluster 434 arch/arm/mach-sunxi/mc_smp.c pr_debug("%s: cluster %u\n", __func__, cluster); cluster 439 arch/arm/mach-sunxi/mc_smp.c reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); cluster 441 arch/arm/mach-sunxi/mc_smp.c writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); cluster 446 arch/arm/mach-sunxi/mc_smp.c unsigned int mpidr, cpu, cluster; cluster 451 arch/arm/mach-sunxi/mc_smp.c cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 452 arch/arm/mach-sunxi/mc_smp.c pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); cluster 455 arch/arm/mach-sunxi/mc_smp.c sunxi_mc_smp_cpu_table[cluster][cpu]--; cluster 456 arch/arm/mach-sunxi/mc_smp.c if (sunxi_mc_smp_cpu_table[cluster][cpu] == 1) { cluster 462 arch/arm/mach-sunxi/mc_smp.c } else if (sunxi_mc_smp_cpu_table[cluster][cpu] > 1) { cluster 464 arch/arm/mach-sunxi/mc_smp.c cluster, cpu); cluster 468 arch/arm/mach-sunxi/mc_smp.c last_man = sunxi_mc_smp_cluster_is_down(cluster); cluster 481 arch/arm/mach-sunxi/mc_smp.c static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster) cluster 486 arch/arm/mach-sunxi/mc_smp.c pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); cluster 487 arch/arm/mach-sunxi/mc_smp.c if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS) cluster 494 arch/arm/mach-sunxi/mc_smp.c reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); cluster 496 arch/arm/mach-sunxi/mc_smp.c writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); cluster 500 arch/arm/mach-sunxi/mc_smp.c sunxi_cpu_power_switch_set(cpu, cluster, false); cluster 505 arch/arm/mach-sunxi/mc_smp.c static int sunxi_cluster_powerdown(unsigned int cluster) cluster 509 arch/arm/mach-sunxi/mc_smp.c pr_debug("%s: cluster %u\n", __func__, cluster); cluster 510 arch/arm/mach-sunxi/mc_smp.c if (cluster >= SUNXI_NR_CLUSTERS) cluster 515 arch/arm/mach-sunxi/mc_smp.c reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); cluster 519 arch/arm/mach-sunxi/mc_smp.c writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); cluster 523 arch/arm/mach-sunxi/mc_smp.c reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); cluster 528 arch/arm/mach-sunxi/mc_smp.c writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); cluster 536 arch/arm/mach-sunxi/mc_smp.c unsigned int mpidr, cpu, cluster; cluster 543 arch/arm/mach-sunxi/mc_smp.c cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 546 arch/arm/mach-sunxi/mc_smp.c if (WARN_ON(cluster >= SUNXI_NR_CLUSTERS || cluster 565 arch/arm/mach-sunxi/mc_smp.c if (sunxi_mc_smp_cpu_table[cluster][cpu]) cluster 568 arch/arm/mach-sunxi/mc_smp.c reg = readl(cpucfg_base + CPUCFG_CX_STATUS(cluster)); cluster 579 arch/arm/mach-sunxi/mc_smp.c sunxi_cpu_powerdown(cpu, cluster); cluster 581 arch/arm/mach-sunxi/mc_smp.c if (!sunxi_mc_smp_cluster_is_down(cluster)) cluster 585 arch/arm/mach-sunxi/mc_smp.c ret = readl_poll_timeout(cpucfg_base + CPUCFG_CX_STATUS(cluster), reg, cluster 600 arch/arm/mach-sunxi/mc_smp.c sunxi_cluster_powerdown(cluster); cluster 605 arch/arm/mach-sunxi/mc_smp.c __func__, cluster, cpu, ret); cluster 631 arch/arm/mach-sunxi/mc_smp.c unsigned int mpidr, cpu, cluster; cluster 635 arch/arm/mach-sunxi/mc_smp.c cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 637 arch/arm/mach-sunxi/mc_smp.c if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) { cluster 641 arch/arm/mach-sunxi/mc_smp.c sunxi_mc_smp_cpu_table[cluster][cpu] = 1; cluster 38 arch/arm/mach-vexpress/dcscb.c static int dcscb_cpu_powerup(unsigned int cpu, unsigned int cluster) cluster 42 arch/arm/mach-vexpress/dcscb.c pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); cluster 43 arch/arm/mach-vexpress/dcscb.c if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster])) cluster 46 arch/arm/mach-vexpress/dcscb.c rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); cluster 48 arch/arm/mach-vexpress/dcscb.c writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); cluster 52 arch/arm/mach-vexpress/dcscb.c static int dcscb_cluster_powerup(unsigned int cluster) cluster 56 arch/arm/mach-vexpress/dcscb.c pr_debug("%s: cluster %u\n", __func__, cluster); cluster 57 arch/arm/mach-vexpress/dcscb.c if (cluster >= 2) cluster 61 arch/arm/mach-vexpress/dcscb.c rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); cluster 63 arch/arm/mach-vexpress/dcscb.c rst_hold |= dcscb_allcpus_mask[cluster]; cluster 64 arch/arm/mach-vexpress/dcscb.c writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); cluster 68 arch/arm/mach-vexpress/dcscb.c static void dcscb_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) cluster 72 arch/arm/mach-vexpress/dcscb.c pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); cluster 73 arch/arm/mach-vexpress/dcscb.c BUG_ON(cluster >= 2 || !((1 << cpu) & dcscb_allcpus_mask[cluster])); cluster 75 arch/arm/mach-vexpress/dcscb.c rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); cluster 77 arch/arm/mach-vexpress/dcscb.c writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); cluster 80 arch/arm/mach-vexpress/dcscb.c static void dcscb_cluster_powerdown_prepare(unsigned int cluster) cluster 84 arch/arm/mach-vexpress/dcscb.c pr_debug("%s: cluster %u\n", __func__, cluster); cluster 85 arch/arm/mach-vexpress/dcscb.c BUG_ON(cluster >= 2); cluster 87 arch/arm/mach-vexpress/dcscb.c rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4); cluster 89 arch/arm/mach-vexpress/dcscb.c writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4); cluster 119 arch/arm/mach-vexpress/spc.c static inline bool cluster_is_a15(u32 cluster) cluster 121 arch/arm/mach-vexpress/spc.c return cluster == info->a15_clusid; cluster 158 arch/arm/mach-vexpress/spc.c void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set) cluster 162 arch/arm/mach-vexpress/spc.c if (cluster >= MAX_CLUSTERS) cluster 167 arch/arm/mach-vexpress/spc.c if (!cluster_is_a15(cluster)) cluster 187 arch/arm/mach-vexpress/spc.c void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr) cluster 191 arch/arm/mach-vexpress/spc.c if (cluster >= MAX_CLUSTERS) cluster 194 arch/arm/mach-vexpress/spc.c if (cluster_is_a15(cluster)) cluster 212 arch/arm/mach-vexpress/spc.c void ve_spc_powerdown(u32 cluster, bool enable) cluster 216 arch/arm/mach-vexpress/spc.c if (cluster >= MAX_CLUSTERS) cluster 219 arch/arm/mach-vexpress/spc.c pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN; cluster 223 arch/arm/mach-vexpress/spc.c static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster) cluster 225 arch/arm/mach-vexpress/spc.c return cluster_is_a15(cluster) ? cluster 242 arch/arm/mach-vexpress/spc.c int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster) cluster 245 arch/arm/mach-vexpress/spc.c u32 mask = standbywfi_cpu_mask(cpu, cluster); cluster 247 arch/arm/mach-vexpress/spc.c if (cluster >= MAX_CLUSTERS) cluster 258 arch/arm/mach-vexpress/spc.c static int ve_spc_get_performance(int cluster, u32 *freq) cluster 260 arch/arm/mach-vexpress/spc.c struct ve_spc_opp *opps = info->opps[cluster]; cluster 264 arch/arm/mach-vexpress/spc.c perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7; cluster 267 arch/arm/mach-vexpress/spc.c if (perf >= info->num_opps[cluster]) cluster 277 arch/arm/mach-vexpress/spc.c static int ve_spc_round_performance(int cluster, u32 freq) cluster 279 arch/arm/mach-vexpress/spc.c int idx, max_opp = info->num_opps[cluster]; cluster 280 arch/arm/mach-vexpress/spc.c struct ve_spc_opp *opps = info->opps[cluster]; cluster 300 arch/arm/mach-vexpress/spc.c static int ve_spc_find_performance_index(int cluster, u32 freq) cluster 302 arch/arm/mach-vexpress/spc.c int idx, max_opp = info->num_opps[cluster]; cluster 303 arch/arm/mach-vexpress/spc.c struct ve_spc_opp *opps = info->opps[cluster]; cluster 322 arch/arm/mach-vexpress/spc.c static int ve_spc_set_performance(int cluster, u32 freq) cluster 327 arch/arm/mach-vexpress/spc.c if (cluster_is_a15(cluster)) { cluster 335 arch/arm/mach-vexpress/spc.c perf = ve_spc_find_performance_index(cluster, freq); cluster 401 arch/arm/mach-vexpress/spc.c static int ve_spc_populate_opps(uint32_t cluster) cluster 410 arch/arm/mach-vexpress/spc.c info->opps[cluster] = opps; cluster 412 arch/arm/mach-vexpress/spc.c off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE; cluster 422 arch/arm/mach-vexpress/spc.c info->num_opps[cluster] = idx; cluster 429 arch/arm/mach-vexpress/spc.c int cluster; cluster 433 arch/arm/mach-vexpress/spc.c cluster = topology_physical_package_id(cpu_dev->id); cluster 434 arch/arm/mach-vexpress/spc.c cluster = cluster < 0 ? 0 : cluster; cluster 436 arch/arm/mach-vexpress/spc.c max_opp = info->num_opps[cluster]; cluster 437 arch/arm/mach-vexpress/spc.c opps = info->opps[cluster]; cluster 492 arch/arm/mach-vexpress/spc.c int cluster; cluster 502 arch/arm/mach-vexpress/spc.c if (ve_spc_get_performance(spc->cluster, &freq)) cluster 513 arch/arm/mach-vexpress/spc.c return ve_spc_round_performance(spc->cluster, drate); cluster 521 arch/arm/mach-vexpress/spc.c return ve_spc_set_performance(spc->cluster, rate / 1000); cluster 540 arch/arm/mach-vexpress/spc.c spc->cluster = topology_physical_package_id(cpu_dev->id); cluster 542 arch/arm/mach-vexpress/spc.c spc->cluster = spc->cluster < 0 ? 0 : spc->cluster; cluster 554 arch/arm/mach-vexpress/spc.c int cpu, cluster; cluster 582 arch/arm/mach-vexpress/spc.c cluster = topology_physical_package_id(cpu_dev->id); cluster 583 arch/arm/mach-vexpress/spc.c if (init_opp_table[cluster]) cluster 592 arch/arm/mach-vexpress/spc.c init_opp_table[cluster] = true; cluster 13 arch/arm/mach-vexpress/spc.h void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set); cluster 14 arch/arm/mach-vexpress/spc.h void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr); cluster 15 arch/arm/mach-vexpress/spc.h void ve_spc_powerdown(u32 cluster, bool enable); cluster 16 arch/arm/mach-vexpress/spc.h int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster); cluster 48 arch/arm/mach-vexpress/tc2_pm.c static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) cluster 50 arch/arm/mach-vexpress/tc2_pm.c pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); cluster 51 arch/arm/mach-vexpress/tc2_pm.c if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) cluster 53 arch/arm/mach-vexpress/tc2_pm.c ve_spc_set_resume_addr(cluster, cpu, cluster 55 arch/arm/mach-vexpress/tc2_pm.c ve_spc_cpu_wakeup_irq(cluster, cpu, true); cluster 59 arch/arm/mach-vexpress/tc2_pm.c static int tc2_pm_cluster_powerup(unsigned int cluster) cluster 61 arch/arm/mach-vexpress/tc2_pm.c pr_debug("%s: cluster %u\n", __func__, cluster); cluster 62 arch/arm/mach-vexpress/tc2_pm.c if (cluster >= TC2_CLUSTERS) cluster 64 arch/arm/mach-vexpress/tc2_pm.c ve_spc_powerdown(cluster, false); cluster 68 arch/arm/mach-vexpress/tc2_pm.c static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) cluster 70 arch/arm/mach-vexpress/tc2_pm.c pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); cluster 71 arch/arm/mach-vexpress/tc2_pm.c BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); cluster 72 arch/arm/mach-vexpress/tc2_pm.c ve_spc_cpu_wakeup_irq(cluster, cpu, true); cluster 83 arch/arm/mach-vexpress/tc2_pm.c static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster) cluster 85 arch/arm/mach-vexpress/tc2_pm.c pr_debug("%s: cluster %u\n", __func__, cluster); cluster 86 arch/arm/mach-vexpress/tc2_pm.c BUG_ON(cluster >= TC2_CLUSTERS); cluster 87 arch/arm/mach-vexpress/tc2_pm.c ve_spc_powerdown(cluster, true); cluster 114 arch/arm/mach-vexpress/tc2_pm.c static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster) cluster 116 arch/arm/mach-vexpress/tc2_pm.c u32 mask = cluster ? cluster 126 arch/arm/mach-vexpress/tc2_pm.c static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) cluster 130 arch/arm/mach-vexpress/tc2_pm.c pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); cluster 131 arch/arm/mach-vexpress/tc2_pm.c BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); cluster 135 arch/arm/mach-vexpress/tc2_pm.c __func__, cpu, cluster, cluster 146 arch/arm/mach-vexpress/tc2_pm.c if (tc2_core_in_reset(cpu, cluster) || cluster 147 arch/arm/mach-vexpress/tc2_pm.c ve_spc_cpu_in_wfi(cpu, cluster)) cluster 157 arch/arm/mach-vexpress/tc2_pm.c static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster) cluster 159 arch/arm/mach-vexpress/tc2_pm.c ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point)); cluster 162 arch/arm/mach-vexpress/tc2_pm.c static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster) cluster 164 arch/arm/mach-vexpress/tc2_pm.c pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); cluster 165 arch/arm/mach-vexpress/tc2_pm.c BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER); cluster 166 arch/arm/mach-vexpress/tc2_pm.c ve_spc_cpu_wakeup_irq(cluster, cpu, false); cluster 167 arch/arm/mach-vexpress/tc2_pm.c ve_spc_set_resume_addr(cluster, cpu, 0); cluster 170 arch/arm/mach-vexpress/tc2_pm.c static void tc2_pm_cluster_is_up(unsigned int cluster) cluster 172 arch/arm/mach-vexpress/tc2_pm.c pr_debug("%s: cluster %u\n", __func__, cluster); cluster 173 arch/arm/mach-vexpress/tc2_pm.c BUG_ON(cluster >= TC2_CLUSTERS); cluster 174 arch/arm/mach-vexpress/tc2_pm.c ve_spc_powerdown(cluster, false); cluster 204 arch/arm/mach-vexpress/tc2_pm.c unsigned int mpidr, cpu, cluster; cluster 246 arch/arm/mach-vexpress/tc2_pm.c cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 247 arch/arm/mach-vexpress/tc2_pm.c pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); cluster 248 arch/arm/mach-vexpress/tc2_pm.c if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) { cluster 168 arch/mips/include/asm/cpu-info.h extern void cpu_set_cluster(struct cpuinfo_mips *cpuinfo, unsigned int cluster); cluster 424 arch/mips/include/asm/mips-cm.h extern void mips_cm_lock_other(unsigned int cluster, unsigned int core, cluster 437 arch/mips/include/asm/mips-cm.h static inline void mips_cm_lock_other(unsigned int cluster, unsigned int core, cluster 133 arch/mips/include/asm/mips-cps.h static inline uint64_t mips_cps_cluster_config(unsigned int cluster) cluster 143 arch/mips/include/asm/mips-cps.h WARN_ON(cluster != 0); cluster 151 arch/mips/include/asm/mips-cps.h mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); cluster 166 arch/mips/include/asm/mips-cps.h static inline unsigned int mips_cps_numcores(unsigned int cluster) cluster 172 arch/mips/include/asm/mips-cps.h return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES; cluster 182 arch/mips/include/asm/mips-cps.h static inline unsigned int mips_cps_numiocu(unsigned int cluster) cluster 189 arch/mips/include/asm/mips-cps.h num_iocu = mips_cps_cluster_config(cluster) & CM_GCR_CONFIG_NUMIOCU; cluster 203 arch/mips/include/asm/mips-cps.h static inline unsigned int mips_cps_numvps(unsigned int cluster, unsigned int core) cluster 214 arch/mips/include/asm/mips-cps.h mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); cluster 65 arch/mips/kernel/cacheinfo.c int cluster = cpu_cluster(&cpu_data[cpu]); cluster 68 arch/mips/kernel/cacheinfo.c if (cpu_cluster(&cpu_data[cpu1]) == cluster) cluster 2239 arch/mips/kernel/cpu-probe.c void cpu_set_cluster(struct cpuinfo_mips *cpuinfo, unsigned int cluster) cluster 2242 arch/mips/kernel/cpu-probe.c WARN_ON(cluster > (MIPS_GLOBALNUMBER_CLUSTER >> cluster 2246 arch/mips/kernel/cpu-probe.c cpuinfo->globalnumber |= cluster << MIPS_GLOBALNUMBER_CLUSTER_SHF; cluster 256 arch/mips/kernel/mips-cm.c void mips_cm_lock_other(unsigned int cluster, unsigned int core, cluster 271 arch/mips/kernel/mips-cm.c val |= cluster << __ffs(CM_GCR_Cx_OTHER_CLUSTER); cluster 274 arch/mips/kernel/mips-cm.c WARN_ON(cluster != 0); cluster 290 arch/mips/kernel/mips-cm.c WARN_ON(cluster != 0); cluster 38 arch/mips/kernel/smp-cps.c static unsigned core_vpe_count(unsigned int cluster, unsigned core) cluster 43 arch/mips/kernel/smp-cps.c return mips_cps_numvps(cluster, core); cluster 101 arch/x86/kernel/apic/x2apic_cluster.c u32 cluster, apicid = apic_read(APIC_LDR); cluster 109 arch/x86/kernel/apic/x2apic_cluster.c cluster = apicid >> 16; cluster 113 arch/x86/kernel/apic/x2apic_cluster.c if (cmsk && cmsk->clusterid == cluster) cluster 117 arch/x86/kernel/apic/x2apic_cluster.c cmsk->clusterid = cluster; cluster 131 arch/x86/kvm/lapic.c u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) { cluster 141 arch/x86/kvm/lapic.c *cluster = &map->phys_map[offset]; cluster 150 arch/x86/kvm/lapic.c *cluster = map->xapic_flat_map; cluster 154 arch/x86/kvm/lapic.c *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf]; cluster 194 arch/x86/kvm/lapic.c struct kvm_lapic **cluster; cluster 232 arch/x86/kvm/lapic.c if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask)) cluster 236 arch/x86/kvm/lapic.c cluster[ffs(mask) - 1] = apic; cluster 4598 arch/x86/kvm/svm.c int cluster = (dlid & 0xf0) >> 4; cluster 4602 arch/x86/kvm/svm.c (cluster >= 0xf)) cluster 4604 arch/x86/kvm/svm.c index = (cluster << 2) + apic; cluster 318 drivers/base/arch_topology.c static int __init parse_cluster(struct device_node *cluster, int depth) cluster 336 drivers/base/arch_topology.c c = of_get_child_by_name(cluster, name); cluster 351 drivers/base/arch_topology.c c = of_get_child_by_name(cluster, name); cluster 366 drivers/base/arch_topology.c cluster, name); cluster 378 drivers/base/arch_topology.c pr_warn("%pOF: empty cluster\n", cluster); cluster 132 drivers/block/paride/pd.c static int cluster = 64; cluster 167 drivers/block/paride/pd.c module_param(cluster, int, 0); cluster 924 drivers/block/paride/pd.c blk_queue_max_hw_sectors(p->queue, cluster); cluster 1010 drivers/block/paride/pd.c name, name, PD_VERSION, major, cluster, nice); cluster 132 drivers/block/paride/pf.c static int cluster = 64; cluster 166 drivers/block/paride/pf.c module_param(cluster, int, 0); cluster 310 drivers/block/paride/pf.c blk_queue_max_segments(disk->queue, cluster); cluster 728 drivers/block/paride/pf.c name, name, PF_VERSION, major, cluster, nice); cluster 136 drivers/clk/mvebu/ap-cpu-clk.c unsigned int cluster; cluster 152 drivers/clk/mvebu/ap-cpu-clk.c (clk->cluster * clk->pll_regs->cluster_offset); cluster 168 drivers/clk/mvebu/ap-cpu-clk.c (clk->cluster * clk->pll_regs->cluster_offset); cluster 170 drivers/clk/mvebu/ap-cpu-clk.c (clk->cluster * clk->pll_regs->cluster_offset); cluster 172 drivers/clk/mvebu/ap-cpu-clk.c (clk->cluster * clk->pll_regs->cluster_offset); cluster 199 drivers/clk/mvebu/ap-cpu-clk.c clk->cluster * cluster 310 drivers/clk/mvebu/ap-cpu-clk.c ap_cpu_clk[cluster_index].cluster = cluster_index; cluster 82 drivers/clocksource/timer-nps.c int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET; cluster 84 drivers/clocksource/timer-nps.c return (u64)ioread32be(nps_msu_reg_low_addr[cluster]); cluster 89 drivers/clocksource/timer-nps.c int ret, cluster; cluster 94 drivers/clocksource/timer-nps.c for (cluster = 0; cluster < NPS_CLUSTER_NUM; cluster++) cluster 95 drivers/clocksource/timer-nps.c nps_msu_reg_low_addr[cluster] = cluster 96 drivers/clocksource/timer-nps.c nps_host_reg((cluster << NPS_CLUSTER_OFFSET), cluster 56 drivers/cpufreq/arm_big_little.c #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq) cluster 57 drivers/cpufreq/arm_big_little.c #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq) cluster 84 drivers/cpufreq/arm_big_little.c static unsigned int find_cluster_maxfreq(int cluster) cluster 92 drivers/cpufreq/arm_big_little.c if ((cluster == per_cpu(physical_cluster, j)) && cluster 97 drivers/cpufreq/arm_big_little.c pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster, cluster 312 drivers/cpufreq/arm_big_little.c u32 cluster = raw_cpu_to_cluster(cpu_dev->id); cluster 314 drivers/cpufreq/arm_big_little.c if (!freq_table[cluster]) cluster 317 drivers/cpufreq/arm_big_little.c clk_put(clk[cluster]); cluster 318 drivers/cpufreq/arm_big_little.c dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); cluster 321 drivers/cpufreq/arm_big_little.c dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster); cluster 327 drivers/cpufreq/arm_big_little.c u32 cluster = cpu_to_cluster(cpu_dev->id); cluster 330 drivers/cpufreq/arm_big_little.c if (atomic_dec_return(&cluster_usage[cluster])) cluster 333 drivers/cpufreq/arm_big_little.c if (cluster < MAX_CLUSTERS) cluster 347 drivers/cpufreq/arm_big_little.c kfree(freq_table[cluster]); cluster 353 drivers/cpufreq/arm_big_little.c u32 cluster = raw_cpu_to_cluster(cpu_dev->id); cluster 356 drivers/cpufreq/arm_big_little.c if (freq_table[cluster]) cluster 366 drivers/cpufreq/arm_big_little.c ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]); cluster 373 drivers/cpufreq/arm_big_little.c clk[cluster] = clk_get(cpu_dev, NULL); cluster 374 drivers/cpufreq/arm_big_little.c if (!IS_ERR(clk[cluster])) { cluster 376 drivers/cpufreq/arm_big_little.c __func__, clk[cluster], freq_table[cluster], cluster 377 drivers/cpufreq/arm_big_little.c cluster); cluster 382 drivers/cpufreq/arm_big_little.c __func__, cpu_dev->id, cluster); cluster 383 drivers/cpufreq/arm_big_little.c ret = PTR_ERR(clk[cluster]); cluster 384 drivers/cpufreq/arm_big_little.c dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]); cluster 391 drivers/cpufreq/arm_big_little.c cluster); cluster 398 drivers/cpufreq/arm_big_little.c u32 cluster = cpu_to_cluster(cpu_dev->id); cluster 401 drivers/cpufreq/arm_big_little.c if (atomic_inc_return(&cluster_usage[cluster]) != 1) cluster 404 drivers/cpufreq/arm_big_little.c if (cluster < MAX_CLUSTERS) { cluster 407 drivers/cpufreq/arm_big_little.c atomic_dec(&cluster_usage[cluster]); cluster 436 drivers/cpufreq/arm_big_little.c __func__, cluster, clk_big_min, clk_little_max); cluster 451 drivers/cpufreq/arm_big_little.c atomic_dec(&cluster_usage[cluster]); cluster 59 drivers/cpufreq/tegra186-cpufreq.c struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; cluster 61 drivers/cpufreq/tegra186-cpufreq.c cluster->info; cluster 73 drivers/cpufreq/tegra186-cpufreq.c policy->freq_table = cluster->table; cluster 216 drivers/cpufreq/tegra186-cpufreq.c struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; cluster 218 drivers/cpufreq/tegra186-cpufreq.c cluster->info = &tegra186_clusters[i]; cluster 219 drivers/cpufreq/tegra186-cpufreq.c cluster->table = init_vhint_table( cluster 220 drivers/cpufreq/tegra186-cpufreq.c pdev, bpmp, cluster->info->bpmp_cluster_id); cluster 221 drivers/cpufreq/tegra186-cpufreq.c if (IS_ERR(cluster->table)) { cluster 222 drivers/cpufreq/tegra186-cpufreq.c err = PTR_ERR(cluster->table); cluster 104 drivers/cpuidle/cpuidle-big_little.c unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); cluster 107 drivers/cpuidle/cpuidle-big_little.c mcpm_set_entry_vector(cpu, cluster, cpu_resume); cluster 497 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c const struct a6xx_cluster *cluster, cluster 507 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c if (cluster->sel_reg) cluster 508 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c in += CRASHDUMP_WRITE(in, cluster->sel_reg, cluster->sel_val); cluster 514 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c (cluster->id << 8) | (i << 4) | i); cluster 516 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c for (j = 0; j < cluster->count; j += 2) { cluster 517 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c int count = RANGE(cluster->registers, j); cluster 519 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c in += CRASHDUMP_READ(in, cluster->registers[j], cluster 539 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c obj->handle = cluster; cluster 1047 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c const struct a6xx_cluster *cluster = obj->handle; cluster 1049 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c if (cluster) { cluster 1050 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c print_name(p, " - cluster-name: ", cluster->name); cluster 1051 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c a6xx_show_cluster_data(cluster->registers, cluster->count, cluster 590 drivers/md/dm-verity-target.c unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster); cluster 592 drivers/md/dm-verity-target.c cluster >>= v->data_dev_block_bits; cluster 593 drivers/md/dm-verity-target.c if (unlikely(!cluster)) cluster 596 drivers/md/dm-verity-target.c if (unlikely(cluster & (cluster - 1))) cluster 597 drivers/md/dm-verity-target.c cluster = 1 << __fls(cluster); cluster 599 drivers/md/dm-verity-target.c hash_block_start &= ~(sector_t)(cluster - 1); cluster 600 drivers/md/dm-verity-target.c hash_block_end |= cluster - 1; cluster 1140 drivers/media/radio/si4713/si4713.c ctrl = ctrl->cluster[c]; cluster 1978 drivers/media/v4l2-core/v4l2-ctrls.c if (!is_cur_manual(ctrl->cluster[0])) { cluster 1980 drivers/media/v4l2-core/v4l2-ctrls.c if (ctrl->cluster[0]->has_volatiles) cluster 2034 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ctrl *ctrl = master->cluster[i]; cluster 2340 drivers/media/v4l2-core/v4l2-ctrls.c ctrl->cluster = &new_ref->ctrl; cluster 2745 drivers/media/v4l2-core/v4l2-ctrls.c controls[i]->cluster = controls; cluster 2893 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ctrl *master = ctrl->cluster[0]; cluster 2903 drivers/media/v4l2-core/v4l2-ctrls.c if (master->cluster[i]) { cluster 2904 drivers/media/v4l2-core/v4l2-ctrls.c cur_to_new(master->cluster[i]); cluster 2905 drivers/media/v4l2-core/v4l2-ctrls.c master->cluster[i]->is_new = 1; cluster 2906 drivers/media/v4l2-core/v4l2-ctrls.c master->cluster[i]->done = true; cluster 3342 drivers/media/v4l2-core/v4l2-ctrls.c if (ctrl->cluster[0]->ncontrols > 1) cluster 3344 drivers/media/v4l2-core/v4l2-ctrls.c if (ctrl->cluster[0] != ctrl) cluster 3345 drivers/media/v4l2-core/v4l2-ctrls.c ref = find_ref_lock(hdl, ctrl->cluster[0]->id); cluster 3475 drivers/media/v4l2-core/v4l2-ctrls.c cur_to_new(master->cluster[j]); cluster 3592 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ctrl *master = ctrl->cluster[0]; cluster 3610 drivers/media/v4l2-core/v4l2-ctrls.c cur_to_new(master->cluster[i]); cluster 3674 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ctrl *ctrl = master->cluster[i]; cluster 3710 drivers/media/v4l2-core/v4l2-ctrls.c master->has_volatiles && master->cluster[i]) cluster 3711 drivers/media/v4l2-core/v4l2-ctrls.c master->cluster[i]->has_changed = true; cluster 3713 drivers/media/v4l2-core/v4l2-ctrls.c new_to_cur(fh, master->cluster[i], ch_flags | cluster 3777 drivers/media/v4l2-core/v4l2-ctrls.c cur_to_new(master->cluster[i]); cluster 3780 drivers/media/v4l2-core/v4l2-ctrls.c if (master->cluster[i]) cluster 3781 drivers/media/v4l2-core/v4l2-ctrls.c master->cluster[i]->is_new = 1; cluster 3839 drivers/media/v4l2-core/v4l2-ctrls.c if (master->cluster[j]) cluster 3840 drivers/media/v4l2-core/v4l2-ctrls.c master->cluster[j]->is_new = 0; cluster 3884 drivers/media/v4l2-core/v4l2-ctrls.c find_ref(hdl, master->cluster[j]->id); cluster 3996 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ctrl *master = ctrl->cluster[0]; cluster 4002 drivers/media/v4l2-core/v4l2-ctrls.c if (master->cluster[i]) cluster 4003 drivers/media/v4l2-core/v4l2-ctrls.c master->cluster[i]->is_new = 0; cluster 4110 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ctrl *master = ctrl->cluster[0]; cluster 4119 drivers/media/v4l2-core/v4l2-ctrls.c cur_to_new(master->cluster[i]); cluster 4179 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ctrl *master = ctrl->cluster[0]; cluster 4193 drivers/media/v4l2-core/v4l2-ctrls.c if (master->cluster[i]) { cluster 4195 drivers/media/v4l2-core/v4l2-ctrls.c find_ref(hdl, master->cluster[i]->id); cluster 4209 drivers/media/v4l2-core/v4l2-ctrls.c if (master->cluster[i]) { cluster 4211 drivers/media/v4l2-core/v4l2-ctrls.c find_ref(hdl, master->cluster[i]->id); cluster 4214 drivers/media/v4l2-core/v4l2-ctrls.c master->cluster[i]->is_new = 1; cluster 121 drivers/memstick/core/mspro_block.c unsigned short cluster; cluster 464 drivers/memstick/core/mspro_block.c x_spfile->cluster); cluster 2209 drivers/net/ethernet/sun/cassini.c int cluster; cluster 2217 drivers/net/ethernet/sun/cassini.c cluster = -1; cluster 2247 drivers/net/ethernet/sun/cassini.c cluster = entry; cluster 2255 drivers/net/ethernet/sun/cassini.c if (cluster < 0) cluster 2259 drivers/net/ethernet/sun/cassini.c writel(cluster, cp->regs + REG_RX_KICK); cluster 2262 drivers/net/ethernet/sun/cassini.c writel(cluster, cp->regs + REG_PLUS_RX_KICK1); cluster 285 drivers/perf/qcom_l2_pmu.c static void cluster_pmu_set_resr(struct cluster_pmu *cluster, cluster 296 drivers/perf/qcom_l2_pmu.c spin_lock_irqsave(&cluster->pmu_lock, flags); cluster 304 drivers/perf/qcom_l2_pmu.c spin_unlock_irqrestore(&cluster->pmu_lock, flags); cluster 361 drivers/perf/qcom_l2_pmu.c static void l2_cache_cluster_set_period(struct cluster_pmu *cluster, cluster 381 drivers/perf/qcom_l2_pmu.c static int l2_cache_get_event_idx(struct cluster_pmu *cluster, cluster 386 drivers/perf/qcom_l2_pmu.c int num_ctrs = cluster->l2cache_pmu->num_counters - 1; cluster 390 drivers/perf/qcom_l2_pmu.c if (test_and_set_bit(l2_cycle_ctr_idx, cluster->used_counters)) cluster 396 drivers/perf/qcom_l2_pmu.c idx = find_first_zero_bit(cluster->used_counters, num_ctrs); cluster 407 drivers/perf/qcom_l2_pmu.c if (test_bit(group, cluster->used_groups)) cluster 410 drivers/perf/qcom_l2_pmu.c set_bit(idx, cluster->used_counters); cluster 411 drivers/perf/qcom_l2_pmu.c set_bit(group, cluster->used_groups); cluster 416 drivers/perf/qcom_l2_pmu.c static void l2_cache_clear_event_idx(struct cluster_pmu *cluster, cluster 422 drivers/perf/qcom_l2_pmu.c clear_bit(idx, cluster->used_counters); cluster 424 drivers/perf/qcom_l2_pmu.c clear_bit(L2_EVT_GROUP(hwc->config_base), cluster->used_groups); cluster 429 drivers/perf/qcom_l2_pmu.c struct cluster_pmu *cluster = data; cluster 430 drivers/perf/qcom_l2_pmu.c int num_counters = cluster->l2cache_pmu->num_counters; cluster 438 drivers/perf/qcom_l2_pmu.c for_each_set_bit(idx, cluster->used_counters, num_counters) { cluster 439 drivers/perf/qcom_l2_pmu.c struct perf_event *event = cluster->events[idx]; cluster 451 drivers/perf/qcom_l2_pmu.c l2_cache_cluster_set_period(cluster, hwc); cluster 483 drivers/perf/qcom_l2_pmu.c struct cluster_pmu *cluster; cluster 530 drivers/perf/qcom_l2_pmu.c cluster = get_cluster_pmu(l2cache_pmu, event->cpu); cluster 531 drivers/perf/qcom_l2_pmu.c if (!cluster) { cluster 540 drivers/perf/qcom_l2_pmu.c (cluster->on_cpu != event->group_leader->cpu)) { cluster 578 drivers/perf/qcom_l2_pmu.c event->cpu = cluster->on_cpu; cluster 585 drivers/perf/qcom_l2_pmu.c struct cluster_pmu *cluster; cluster 593 drivers/perf/qcom_l2_pmu.c cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); cluster 595 drivers/perf/qcom_l2_pmu.c l2_cache_cluster_set_period(cluster, hwc); cluster 606 drivers/perf/qcom_l2_pmu.c cluster_pmu_set_resr(cluster, event_group, event_cc); cluster 635 drivers/perf/qcom_l2_pmu.c struct cluster_pmu *cluster; cluster 637 drivers/perf/qcom_l2_pmu.c cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); cluster 639 drivers/perf/qcom_l2_pmu.c idx = l2_cache_get_event_idx(cluster, event); cluster 645 drivers/perf/qcom_l2_pmu.c cluster->events[idx] = event; cluster 660 drivers/perf/qcom_l2_pmu.c struct cluster_pmu *cluster; cluster 663 drivers/perf/qcom_l2_pmu.c cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); cluster 666 drivers/perf/qcom_l2_pmu.c cluster->events[idx] = NULL; cluster 667 drivers/perf/qcom_l2_pmu.c l2_cache_clear_event_idx(cluster, event); cluster 784 drivers/perf/qcom_l2_pmu.c struct cluster_pmu *cluster = NULL; cluster 797 drivers/perf/qcom_l2_pmu.c list_for_each_entry(cluster, &l2cache_pmu->clusters, next) { cluster 798 drivers/perf/qcom_l2_pmu.c if (cluster->cluster_id != cpu_cluster_id) cluster 803 drivers/perf/qcom_l2_pmu.c cluster->cluster_id); cluster 804 drivers/perf/qcom_l2_pmu.c cpumask_set_cpu(cpu, &cluster->cluster_cpus); cluster 805 drivers/perf/qcom_l2_pmu.c *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; cluster 809 drivers/perf/qcom_l2_pmu.c return cluster; cluster 814 drivers/perf/qcom_l2_pmu.c struct cluster_pmu *cluster; cluster 818 drivers/perf/qcom_l2_pmu.c cluster = get_cluster_pmu(l2cache_pmu, cpu); cluster 819 drivers/perf/qcom_l2_pmu.c if (!cluster) { cluster 821 drivers/perf/qcom_l2_pmu.c cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu); cluster 822 drivers/perf/qcom_l2_pmu.c if (!cluster) { cluster 830 drivers/perf/qcom_l2_pmu.c if (cluster->on_cpu != -1) cluster 837 drivers/perf/qcom_l2_pmu.c cluster->on_cpu = cpu; cluster 841 drivers/perf/qcom_l2_pmu.c WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu))); cluster 842 drivers/perf/qcom_l2_pmu.c enable_irq(cluster->irq); cluster 849 drivers/perf/qcom_l2_pmu.c struct cluster_pmu *cluster; cluster 855 drivers/perf/qcom_l2_pmu.c cluster = get_cluster_pmu(l2cache_pmu, cpu); cluster 856 drivers/perf/qcom_l2_pmu.c if (!cluster) cluster 860 drivers/perf/qcom_l2_pmu.c if (cluster->on_cpu != cpu) cluster 865 drivers/perf/qcom_l2_pmu.c cluster->on_cpu = -1; cluster 868 drivers/perf/qcom_l2_pmu.c cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus, cluster 872 drivers/perf/qcom_l2_pmu.c disable_irq(cluster->irq); cluster 877 drivers/perf/qcom_l2_pmu.c cluster->on_cpu = target; cluster 879 drivers/perf/qcom_l2_pmu.c WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(target))); cluster 889 drivers/perf/qcom_l2_pmu.c struct cluster_pmu *cluster; cluster 903 drivers/perf/qcom_l2_pmu.c cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL); cluster 904 drivers/perf/qcom_l2_pmu.c if (!cluster) cluster 907 drivers/perf/qcom_l2_pmu.c INIT_LIST_HEAD(&cluster->next); cluster 908 drivers/perf/qcom_l2_pmu.c list_add(&cluster->next, &l2cache_pmu->clusters); cluster 909 drivers/perf/qcom_l2_pmu.c cluster->cluster_id = fw_cluster_id; cluster 915 drivers/perf/qcom_l2_pmu.c cluster->irq = irq; cluster 917 drivers/perf/qcom_l2_pmu.c cluster->l2cache_pmu = l2cache_pmu; cluster 918 drivers/perf/qcom_l2_pmu.c cluster->on_cpu = -1; cluster 922 drivers/perf/qcom_l2_pmu.c "l2-cache-pmu", cluster); cluster 932 drivers/perf/qcom_l2_pmu.c spin_lock_init(&cluster->pmu_lock); cluster 221 drivers/pinctrl/nomadik/pinctrl-abx500.c struct abx500_gpio_irq_cluster *cluster = cluster 224 drivers/pinctrl/nomadik/pinctrl-abx500.c if (gpio >= cluster->start && gpio <= cluster->end) { cluster 231 drivers/pinctrl/nomadik/pinctrl-abx500.c hwirq = gpio - cluster->start + cluster->to_irq; cluster 1366 drivers/scsi/aacraid/aacraid.h __le32 cluster; cluster 1320 drivers/scsi/megaraid/megaraid_sas.h } cluster; cluster 6247 drivers/scsi/megaraid/megaraid_sas_base.c instance->peerIsPresent = ctrl_info->cluster.peerIsPresent; cluster 6248 drivers/scsi/megaraid/megaraid_sas_base.c instance->passive = ctrl_info->cluster.passive; cluster 3082 drivers/staging/exfat/exfat_super.c unsigned int cluster; cluster 3115 drivers/staging/exfat/exfat_super.c err = ffsMapCluster(inode, clu_offset, &cluster); cluster 3122 drivers/staging/exfat/exfat_super.c } else if (cluster != CLUSTER_32(~0)) { cluster 3123 drivers/staging/exfat/exfat_super.c *phys = START_SECTOR(cluster) + sec_offset; cluster 866 fs/btrfs/block-group.c struct btrfs_free_cluster *cluster; cluster 896 fs/btrfs/block-group.c cluster = &fs_info->data_alloc_cluster; cluster 897 fs/btrfs/block-group.c spin_lock(&cluster->refill_lock); cluster 898 fs/btrfs/block-group.c btrfs_return_cluster_to_free_space(block_group, cluster); cluster 899 fs/btrfs/block-group.c spin_unlock(&cluster->refill_lock); cluster 905 fs/btrfs/block-group.c cluster = &fs_info->meta_alloc_cluster; cluster 906 fs/btrfs/block-group.c spin_lock(&cluster->refill_lock); cluster 907 fs/btrfs/block-group.c btrfs_return_cluster_to_free_space(block_group, cluster); cluster 908 fs/btrfs/block-group.c spin_unlock(&cluster->refill_lock); cluster 2791 fs/btrfs/extent-tree.c struct btrfs_free_cluster *cluster = NULL; cluster 2807 fs/btrfs/extent-tree.c cluster = fetch_cluster_info(fs_info, cluster 2832 fs/btrfs/extent-tree.c if (cluster && cluster->fragmented && cluster 2834 fs/btrfs/extent-tree.c spin_lock(&cluster->lock); cluster 2835 fs/btrfs/extent-tree.c cluster->fragmented = 0; cluster 2836 fs/btrfs/extent-tree.c spin_unlock(&cluster->lock); cluster 3371 fs/btrfs/extent-tree.c struct btrfs_free_cluster *cluster, cluster 3376 fs/btrfs/extent-tree.c spin_lock(&cluster->refill_lock); cluster 3378 fs/btrfs/extent-tree.c used_bg = cluster->block_group; cluster 3393 fs/btrfs/extent-tree.c spin_unlock(&cluster->refill_lock); cluster 3398 fs/btrfs/extent-tree.c spin_lock(&cluster->refill_lock); cluster 3399 fs/btrfs/extent-tree.c if (used_bg == cluster->block_group) cluster 927 fs/btrfs/free-space-cache.c struct btrfs_free_cluster *cluster = NULL; cluster 934 fs/btrfs/free-space-cache.c cluster = list_entry(block_group->cluster_list.next, cluster 939 fs/btrfs/free-space-cache.c if (!node && cluster) { cluster 940 fs/btrfs/free-space-cache.c cluster_locked = cluster; cluster 942 fs/btrfs/free-space-cache.c node = rb_first(&cluster->root); cluster 943 fs/btrfs/free-space-cache.c cluster = NULL; cluster 963 fs/btrfs/free-space-cache.c if (!node && cluster) { cluster 964 fs/btrfs/free-space-cache.c node = rb_first(&cluster->root); cluster 965 fs/btrfs/free-space-cache.c cluster_locked = cluster; cluster 967 fs/btrfs/free-space-cache.c cluster = NULL; cluster 2072 fs/btrfs/free-space-cache.c struct btrfs_free_cluster *cluster; cluster 2076 fs/btrfs/free-space-cache.c cluster = list_entry(block_group->cluster_list.next, cluster 2079 fs/btrfs/free-space-cache.c spin_lock(&cluster->lock); cluster 2080 fs/btrfs/free-space-cache.c node = rb_first(&cluster->root); cluster 2082 fs/btrfs/free-space-cache.c spin_unlock(&cluster->lock); cluster 2088 fs/btrfs/free-space-cache.c spin_unlock(&cluster->lock); cluster 2098 fs/btrfs/free-space-cache.c spin_unlock(&cluster->lock); cluster 2542 fs/btrfs/free-space-cache.c struct btrfs_free_cluster *cluster) cluster 2548 fs/btrfs/free-space-cache.c spin_lock(&cluster->lock); cluster 2549 fs/btrfs/free-space-cache.c if (cluster->block_group != block_group) cluster 2552 fs/btrfs/free-space-cache.c cluster->block_group = NULL; cluster 2553 fs/btrfs/free-space-cache.c cluster->window_start = 0; cluster 2554 fs/btrfs/free-space-cache.c list_del_init(&cluster->block_group_list); cluster 2556 fs/btrfs/free-space-cache.c node = rb_first(&cluster->root); cluster 2562 fs/btrfs/free-space-cache.c rb_erase(&entry->offset_index, &cluster->root); cluster 2573 fs/btrfs/free-space-cache.c cluster->root = RB_ROOT; cluster 2576 fs/btrfs/free-space-cache.c spin_unlock(&cluster->lock); cluster 2610 fs/btrfs/free-space-cache.c struct btrfs_free_cluster *cluster; cluster 2616 fs/btrfs/free-space-cache.c cluster = list_entry(head, struct btrfs_free_cluster, cluster 2619 fs/btrfs/free-space-cache.c WARN_ON(cluster->block_group != block_group); cluster 2620 fs/btrfs/free-space-cache.c __btrfs_return_cluster_to_free_space(block_group, cluster); cluster 2684 fs/btrfs/free-space-cache.c struct btrfs_free_cluster *cluster) cluster 2690 fs/btrfs/free-space-cache.c spin_lock(&cluster->lock); cluster 2692 fs/btrfs/free-space-cache.c block_group = cluster->block_group; cluster 2694 fs/btrfs/free-space-cache.c spin_unlock(&cluster->lock); cluster 2697 fs/btrfs/free-space-cache.c } else if (cluster->block_group != block_group) { cluster 2699 fs/btrfs/free-space-cache.c spin_unlock(&cluster->lock); cluster 2703 fs/btrfs/free-space-cache.c spin_unlock(&cluster->lock); cluster 2709 fs/btrfs/free-space-cache.c ret = __btrfs_return_cluster_to_free_space(block_group, cluster); cluster 2718 fs/btrfs/free-space-cache.c struct btrfs_free_cluster *cluster, cluster 2725 fs/btrfs/free-space-cache.c u64 search_start = cluster->window_start; cluster 2751 fs/btrfs/free-space-cache.c struct btrfs_free_cluster *cluster, u64 bytes, cluster 2759 fs/btrfs/free-space-cache.c spin_lock(&cluster->lock); cluster 2760 fs/btrfs/free-space-cache.c if (bytes > cluster->max_size) cluster 2763 fs/btrfs/free-space-cache.c if (cluster->block_group != block_group) cluster 2766 fs/btrfs/free-space-cache.c node = rb_first(&cluster->root); cluster 2788 fs/btrfs/free-space-cache.c cluster, entry, bytes, cluster 2789 fs/btrfs/free-space-cache.c cluster->window_start, cluster 2799 fs/btrfs/free-space-cache.c cluster->window_start += bytes; cluster 2808 fs/btrfs/free-space-cache.c rb_erase(&entry->offset_index, &cluster->root); cluster 2812 fs/btrfs/free-space-cache.c spin_unlock(&cluster->lock); cluster 2838 fs/btrfs/free-space-cache.c struct btrfs_free_cluster *cluster, cluster 2888 fs/btrfs/free-space-cache.c cluster->max_size = 0; cluster 2893 fs/btrfs/free-space-cache.c if (cluster->max_size < found_bits * ctl->unit) cluster 2894 fs/btrfs/free-space-cache.c cluster->max_size = found_bits * ctl->unit; cluster 2896 fs/btrfs/free-space-cache.c if (total_found < want_bits || cluster->max_size < cont1_bytes) { cluster 2901 fs/btrfs/free-space-cache.c cluster->window_start = start * ctl->unit + entry->offset; cluster 2903 fs/btrfs/free-space-cache.c ret = tree_insert_offset(&cluster->root, entry->offset, cluster 2907 fs/btrfs/free-space-cache.c trace_btrfs_setup_cluster(block_group, cluster, cluster 2919 fs/btrfs/free-space-cache.c struct btrfs_free_cluster *cluster, cluster 2976 fs/btrfs/free-space-cache.c cluster->window_start = first->offset; cluster 2993 fs/btrfs/free-space-cache.c ret = tree_insert_offset(&cluster->root, entry->offset, cluster 2999 fs/btrfs/free-space-cache.c cluster->max_size = max_extent; cluster 3000 fs/btrfs/free-space-cache.c trace_btrfs_setup_cluster(block_group, cluster, total_size, 0); cluster 3010 fs/btrfs/free-space-cache.c struct btrfs_free_cluster *cluster, cluster 3038 fs/btrfs/free-space-cache.c ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, cluster 3060 fs/btrfs/free-space-cache.c struct btrfs_free_cluster *cluster, cluster 3098 fs/btrfs/free-space-cache.c spin_lock(&cluster->lock); cluster 3101 fs/btrfs/free-space-cache.c if (cluster->block_group) { cluster 3109 fs/btrfs/free-space-cache.c ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, cluster 3113 fs/btrfs/free-space-cache.c ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, cluster 3123 fs/btrfs/free-space-cache.c list_add_tail(&cluster->block_group_list, cluster 3125 fs/btrfs/free-space-cache.c cluster->block_group = block_group; cluster 3130 fs/btrfs/free-space-cache.c spin_unlock(&cluster->lock); cluster 3139 fs/btrfs/free-space-cache.c void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) cluster 3141 fs/btrfs/free-space-cache.c spin_lock_init(&cluster->lock); cluster 3142 fs/btrfs/free-space-cache.c spin_lock_init(&cluster->refill_lock); cluster 3143 fs/btrfs/free-space-cache.c cluster->root = RB_ROOT; cluster 3144 fs/btrfs/free-space-cache.c cluster->max_size = 0; cluster 3145 fs/btrfs/free-space-cache.c cluster->fragmented = false; cluster 3146 fs/btrfs/free-space-cache.c INIT_LIST_HEAD(&cluster->block_group_list); cluster 3147 fs/btrfs/free-space-cache.c cluster->block_group = NULL; cluster 102 fs/btrfs/free-space-cache.h struct btrfs_free_cluster *cluster, cluster 104 fs/btrfs/free-space-cache.h void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster); cluster 106 fs/btrfs/free-space-cache.h struct btrfs_free_cluster *cluster, u64 bytes, cluster 110 fs/btrfs/free-space-cache.h struct btrfs_free_cluster *cluster); cluster 1406 fs/btrfs/ioctl.c unsigned long cluster = max_cluster; cluster 1508 fs/btrfs/ioctl.c cluster = (PAGE_ALIGN(defrag_end) >> cluster 1510 fs/btrfs/ioctl.c cluster = min(cluster, max_cluster); cluster 1512 fs/btrfs/ioctl.c cluster = max_cluster; cluster 1515 fs/btrfs/ioctl.c if (i + cluster > ra_index) { cluster 1519 fs/btrfs/ioctl.c file, ra_index, cluster); cluster 1520 fs/btrfs/ioctl.c ra_index += cluster; cluster 1529 fs/btrfs/ioctl.c ret = cluster_pages_for_defrag(inode, pages, i, cluster); cluster 160 fs/btrfs/relocation.c struct file_extent_cluster cluster; cluster 3197 fs/btrfs/relocation.c struct file_extent_cluster *cluster) cluster 3206 fs/btrfs/relocation.c u64 prealloc_start = cluster->start - offset; cluster 3207 fs/btrfs/relocation.c u64 prealloc_end = cluster->end - offset; cluster 3211 fs/btrfs/relocation.c BUG_ON(cluster->start != cluster->boundary[0]); cluster 3220 fs/btrfs/relocation.c while (nr < cluster->nr) { cluster 3221 fs/btrfs/relocation.c start = cluster->boundary[nr] - offset; cluster 3222 fs/btrfs/relocation.c if (nr + 1 < cluster->nr) cluster 3223 fs/btrfs/relocation.c end = cluster->boundary[nr + 1] - 1 - offset; cluster 3225 fs/btrfs/relocation.c end = cluster->end - offset; cluster 3286 fs/btrfs/relocation.c struct file_extent_cluster *cluster) cluster 3300 fs/btrfs/relocation.c if (!cluster->nr) cluster 3307 fs/btrfs/relocation.c ret = prealloc_file_extent_cluster(inode, cluster); cluster 3313 fs/btrfs/relocation.c ret = setup_extent_mapping(inode, cluster->start - offset, cluster 3314 fs/btrfs/relocation.c cluster->end - offset, cluster->start); cluster 3318 fs/btrfs/relocation.c index = (cluster->start - offset) >> PAGE_SHIFT; cluster 3319 fs/btrfs/relocation.c last_index = (cluster->end - offset) >> PAGE_SHIFT; cluster 3371 fs/btrfs/relocation.c if (nr < cluster->nr && cluster 3372 fs/btrfs/relocation.c page_start + offset == cluster->boundary[nr]) { cluster 3407 fs/btrfs/relocation.c WARN_ON(nr != cluster->nr); cluster 3415 fs/btrfs/relocation.c struct file_extent_cluster *cluster) cluster 3419 fs/btrfs/relocation.c if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) { cluster 3420 fs/btrfs/relocation.c ret = relocate_file_extent_cluster(inode, cluster); cluster 3423 fs/btrfs/relocation.c cluster->nr = 0; cluster 3426 fs/btrfs/relocation.c if (!cluster->nr) cluster 3427 fs/btrfs/relocation.c cluster->start = extent_key->objectid; cluster 3429 fs/btrfs/relocation.c BUG_ON(cluster->nr >= MAX_EXTENTS); cluster 3430 fs/btrfs/relocation.c cluster->end = extent_key->objectid + extent_key->offset - 1; cluster 3431 fs/btrfs/relocation.c cluster->boundary[cluster->nr] = extent_key->objectid; cluster 3432 fs/btrfs/relocation.c cluster->nr++; cluster 3434 fs/btrfs/relocation.c if (cluster->nr >= MAX_EXTENTS) { cluster 3435 fs/btrfs/relocation.c ret = relocate_file_extent_cluster(inode, cluster); cluster 3438 fs/btrfs/relocation.c cluster->nr = 0; cluster 4038 fs/btrfs/relocation.c memset(&rc->cluster, 0, sizeof(rc->cluster)); cluster 4175 fs/btrfs/relocation.c &key, &rc->cluster); cluster 4201 fs/btrfs/relocation.c &rc->cluster); cluster 423 fs/dlm/lockspace.c static int new_lockspace(const char *name, const char *cluster, cluster 455 fs/dlm/lockspace.c if (!cluster) cluster 459 fs/dlm/lockspace.c if (dlm_config.ci_recover_callbacks && cluster && cluster 460 fs/dlm/lockspace.c strncmp(cluster, dlm_config.ci_cluster_name, DLM_LOCKSPACE_LEN)) { cluster 463 fs/dlm/lockspace.c dlm_config.ci_cluster_name, cluster); cluster 696 fs/dlm/lockspace.c int dlm_new_lockspace(const char *name, const char *cluster, cluster 709 fs/dlm/lockspace.c error = new_lockspace(name, cluster, flags, lvblen, ops, ops_arg, cluster 287 fs/ext4/ext4.h #define EXT4_C2B(sbi, cluster) ((cluster) << (sbi)->s_cluster_bits) cluster 2779 fs/ext4/mballoc.c ext4_group_t block_group, ext4_grpblk_t cluster, int count, cluster 2784 fs/ext4/mballoc.c discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + cluster 4650 fs/ext4/mballoc.c ext4_grpblk_t cluster; cluster 4664 fs/ext4/mballoc.c cluster = new_entry->efd_start_cluster; cluster 4678 fs/ext4/mballoc.c if (cluster < entry->efd_start_cluster) cluster 4680 fs/ext4/mballoc.c else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) cluster 4685 fs/ext4/mballoc.c EXT4_C2B(sbi, cluster), cluster 225 fs/fat/cache.c int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus) cluster 244 fs/fat/cache.c if (cluster == 0) cluster 247 fs/fat/cache.c if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) { cluster 256 fs/fat/cache.c while (*fclus < cluster) { cluster 291 fs/fat/cache.c static int fat_bmap_cluster(struct inode *inode, int cluster) cluster 299 fs/fat/cache.c ret = fat_get_cluster(inode, cluster, &fclus, &dclus); cluster 316 fs/fat/cache.c int cluster, offset; cluster 318 fs/fat/cache.c cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); cluster 320 fs/fat/cache.c cluster = fat_bmap_cluster(inode, cluster); cluster 321 fs/fat/cache.c if (cluster < 0) cluster 322 fs/fat/cache.c return cluster; cluster 323 fs/fat/cache.c else if (cluster) { cluster 324 fs/fat/cache.c *bmap = fat_clus_to_blknr(sbi, cluster) + offset; cluster 1146 fs/fat/dir.c int err, cluster; cluster 1148 fs/fat/dir.c err = fat_alloc_clusters(dir, &cluster, 1); cluster 1152 fs/fat/dir.c blknr = fat_clus_to_blknr(sbi, cluster); cluster 1181 fs/fat/dir.c fat_set_start(&de[0], cluster); cluster 1193 fs/fat/dir.c return cluster; cluster 1196 fs/fat/dir.c fat_free_clusters(dir, cluster); cluster 1211 fs/fat/dir.c int err, i, n, offset, cluster[2]; cluster 1222 fs/fat/dir.c err = fat_alloc_clusters(dir, cluster, *nr_cluster); cluster 1233 fs/fat/dir.c start_blknr = blknr = fat_clus_to_blknr(sbi, cluster[i]); cluster 1271 fs/fat/dir.c return cluster[0]; cluster 1280 fs/fat/dir.c fat_free_clusters(dir, cluster[0]); cluster 1373 fs/fat/dir.c int cluster, nr_cluster; cluster 1380 fs/fat/dir.c cluster = fat_add_new_entries(dir, slots, nr_slots, &nr_cluster, cluster 1382 fs/fat/dir.c if (cluster < 0) { cluster 1383 fs/fat/dir.c err = cluster; cluster 1386 fs/fat/dir.c err = fat_chain_add(dir, cluster, nr_cluster); cluster 1388 fs/fat/dir.c fat_free_clusters(dir, cluster); cluster 287 fs/fat/fat.h int cluster = le16_to_cpu(de->start); cluster 289 fs/fat/fat.h cluster |= (le16_to_cpu(de->starthi) << 16); cluster 290 fs/fat/fat.h return cluster; cluster 293 fs/fat/fat.h static inline void fat_set_start(struct msdos_dir_entry *de, int cluster) cluster 295 fs/fat/fat.h de->start = cpu_to_le16(cluster); cluster 296 fs/fat/fat.h de->starthi = cpu_to_le16(cluster >> 16); cluster 315 fs/fat/fat.h extern int fat_get_cluster(struct inode *inode, int cluster, cluster 389 fs/fat/fat.h extern int fat_alloc_clusters(struct inode *inode, int *cluster, cluster 391 fs/fat/fat.h extern int fat_free_clusters(struct inode *inode, int cluster); cluster 464 fs/fat/fatent.c int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster) cluster 511 fs/fat/fatent.c cluster[idx_clus] = entry; cluster 547 fs/fat/fatent.c fat_free_clusters(inode, cluster[0]); cluster 552 fs/fat/fatent.c int fat_free_clusters(struct inode *inode, int cluster) cluster 560 fs/fat/fatent.c int first_cl = cluster, dirty_fsinfo = 0; cluster 566 fs/fat/fatent.c cluster = fat_ent_read(inode, &fatent, cluster); cluster 567 fs/fat/fatent.c if (cluster < 0) { cluster 568 fs/fat/fatent.c err = cluster; cluster 570 fs/fat/fatent.c } else if (cluster == FAT_ENT_FREE) { cluster 583 fs/fat/fatent.c if (cluster != fatent.entry + 1) { cluster 591 fs/fat/fatent.c first_cl = cluster; cluster 615 fs/fat/fatent.c } while (cluster != FAT_ENT_EOF); cluster 105 fs/fat/inode.c int err, cluster; cluster 107 fs/fat/inode.c err = fat_alloc_clusters(inode, &cluster, 1); cluster 112 fs/fat/inode.c err = fat_chain_add(inode, cluster, 1); cluster 114 fs/fat/inode.c fat_free_clusters(inode, cluster); cluster 228 fs/fat/namei_msdos.c int is_dir, int is_hid, int cluster, cluster 247 fs/fat/namei_msdos.c fat_set_start(&de, cluster); cluster 349 fs/fat/namei_msdos.c int err, is_hid, cluster; cluster 366 fs/fat/namei_msdos.c cluster = fat_alloc_new_dir(dir, &ts); cluster 367 fs/fat/namei_msdos.c if (cluster < 0) { cluster 368 fs/fat/namei_msdos.c err = cluster; cluster 371 fs/fat/namei_msdos.c err = msdos_add_entry(dir, msdos_name, 1, is_hid, cluster, &ts, &sinfo); cluster 394 fs/fat/namei_msdos.c fat_free_clusters(dir, cluster); cluster 580 fs/fat/namei_vfat.c int len, int is_dir, int cluster, cluster 649 fs/fat/namei_vfat.c fat_set_start(de, cluster); cluster 657 fs/fat/namei_vfat.c int is_dir, int cluster, struct timespec64 *ts, cluster 672 fs/fat/namei_vfat.c err = vfat_build_slots(dir, qname->name, len, is_dir, cluster, ts, cluster 855 fs/fat/namei_vfat.c int err, cluster; cluster 860 fs/fat/namei_vfat.c cluster = fat_alloc_new_dir(dir, &ts); cluster 861 fs/fat/namei_vfat.c if (cluster < 0) { cluster 862 fs/fat/namei_vfat.c err = cluster; cluster 865 fs/fat/namei_vfat.c err = vfat_add_entry(dir, &dentry->d_name, 1, cluster, &ts, &sinfo); cluster 889 fs/fat/namei_vfat.c fat_free_clusters(dir, cluster); cluster 1198 fs/gfs2/lock_dlm.c char cluster[GFS2_LOCKNAME_LEN]; cluster 1232 fs/gfs2/lock_dlm.c memset(cluster, 0, sizeof(cluster)); cluster 1233 fs/gfs2/lock_dlm.c memcpy(cluster, table, strlen(table) - strlen(fsname)); cluster 1242 fs/gfs2/lock_dlm.c error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE, cluster 51 fs/ocfs2/cluster/nodemanager.c struct o2nm_cluster *cluster = o2nm_single_cluster; cluster 53 fs/ocfs2/cluster/nodemanager.c BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap))); cluster 55 fs/ocfs2/cluster/nodemanager.c if (cluster == NULL) cluster 58 fs/ocfs2/cluster/nodemanager.c read_lock(&cluster->cl_nodes_lock); cluster 59 fs/ocfs2/cluster/nodemanager.c memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); cluster 60 fs/ocfs2/cluster/nodemanager.c read_unlock(&cluster->cl_nodes_lock); cluster 66 fs/ocfs2/cluster/nodemanager.c static struct o2nm_node *o2nm_node_ip_tree_lookup(struct o2nm_cluster *cluster, cluster 71 fs/ocfs2/cluster/nodemanager.c struct rb_node **p = &cluster->cl_node_ip_tree.rb_node; cluster 104 fs/ocfs2/cluster/nodemanager.c struct o2nm_cluster *cluster = o2nm_single_cluster; cluster 106 fs/ocfs2/cluster/nodemanager.c if (cluster == NULL) cluster 109 fs/ocfs2/cluster/nodemanager.c read_lock(&cluster->cl_nodes_lock); cluster 110 fs/ocfs2/cluster/nodemanager.c node = o2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL); cluster 113 fs/ocfs2/cluster/nodemanager.c read_unlock(&cluster->cl_nodes_lock); cluster 189 fs/ocfs2/cluster/nodemanager.c struct o2nm_cluster *cluster; cluster 210 fs/ocfs2/cluster/nodemanager.c cluster = to_o2nm_cluster_from_node(node); cluster 211 fs/ocfs2/cluster/nodemanager.c if (!cluster) { cluster 216 fs/ocfs2/cluster/nodemanager.c write_lock(&cluster->cl_nodes_lock); cluster 217 fs/ocfs2/cluster/nodemanager.c if (cluster->cl_nodes[tmp]) cluster 223 fs/ocfs2/cluster/nodemanager.c cluster->cl_nodes[tmp] = node; cluster 225 fs/ocfs2/cluster/nodemanager.c set_bit(tmp, cluster->cl_nodes_bitmap); cluster 227 fs/ocfs2/cluster/nodemanager.c write_unlock(&cluster->cl_nodes_lock); cluster 273 fs/ocfs2/cluster/nodemanager.c struct o2nm_cluster *cluster; cluster 291 fs/ocfs2/cluster/nodemanager.c cluster = to_o2nm_cluster_from_node(node); cluster 292 fs/ocfs2/cluster/nodemanager.c if (!cluster) { cluster 298 fs/ocfs2/cluster/nodemanager.c write_lock(&cluster->cl_nodes_lock); cluster 299 fs/ocfs2/cluster/nodemanager.c if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent)) cluster 306 fs/ocfs2/cluster/nodemanager.c rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree); cluster 308 fs/ocfs2/cluster/nodemanager.c write_unlock(&cluster->cl_nodes_lock); cluster 328 fs/ocfs2/cluster/nodemanager.c struct o2nm_cluster *cluster; cluster 347 fs/ocfs2/cluster/nodemanager.c cluster = to_o2nm_cluster_from_node(node); cluster 348 fs/ocfs2/cluster/nodemanager.c if (!cluster) { cluster 355 fs/ocfs2/cluster/nodemanager.c if (tmp && tmp == cluster->cl_has_local && cluster 356 fs/ocfs2/cluster/nodemanager.c cluster->cl_local_node != node->nd_num) { cluster 362 fs/ocfs2/cluster/nodemanager.c if (tmp && !cluster->cl_has_local) { cluster 368 fs/ocfs2/cluster/nodemanager.c if (!tmp && cluster->cl_has_local && cluster 369 fs/ocfs2/cluster/nodemanager.c cluster->cl_local_node == node->nd_num) { cluster 371 fs/ocfs2/cluster/nodemanager.c cluster->cl_local_node = O2NM_INVALID_NODE_NUM; cluster 376 fs/ocfs2/cluster/nodemanager.c cluster->cl_has_local = tmp; cluster 377 fs/ocfs2/cluster/nodemanager.c cluster->cl_local_node = node->nd_num; cluster 455 fs/ocfs2/cluster/nodemanager.c struct o2nm_cluster *cluster = to_o2nm_cluster(item); cluster 462 fs/ocfs2/cluster/nodemanager.c if (cluster->cl_idle_timeout_ms != val cluster 470 fs/ocfs2/cluster/nodemanager.c } else if (val <= cluster->cl_keepalive_delay_ms) { cluster 475 fs/ocfs2/cluster/nodemanager.c cluster->cl_idle_timeout_ms = val; cluster 492 fs/ocfs2/cluster/nodemanager.c struct o2nm_cluster *cluster = to_o2nm_cluster(item); cluster 499 fs/ocfs2/cluster/nodemanager.c if (cluster->cl_keepalive_delay_ms != val cluster 507 fs/ocfs2/cluster/nodemanager.c } else if (val >= cluster->cl_idle_timeout_ms) { cluster 512 fs/ocfs2/cluster/nodemanager.c cluster->cl_keepalive_delay_ms = val; cluster 536 fs/ocfs2/cluster/nodemanager.c struct o2nm_cluster *cluster = to_o2nm_cluster(item); cluster 539 fs/ocfs2/cluster/nodemanager.c if (cluster) cluster 541 fs/ocfs2/cluster/nodemanager.c o2nm_fence_method_desc[cluster->cl_fence_method]); cluster 608 fs/ocfs2/cluster/nodemanager.c struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent); cluster 610 fs/ocfs2/cluster/nodemanager.c if (cluster->cl_nodes[node->nd_num] == node) { cluster 613 fs/ocfs2/cluster/nodemanager.c if (cluster->cl_has_local && cluster 614 fs/ocfs2/cluster/nodemanager.c (cluster->cl_local_node == node->nd_num)) { cluster 615 fs/ocfs2/cluster/nodemanager.c cluster->cl_has_local = 0; cluster 616 fs/ocfs2/cluster/nodemanager.c cluster->cl_local_node = O2NM_INVALID_NODE_NUM; cluster 623 fs/ocfs2/cluster/nodemanager.c write_lock(&cluster->cl_nodes_lock); cluster 627 fs/ocfs2/cluster/nodemanager.c rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree); cluster 630 fs/ocfs2/cluster/nodemanager.c if (cluster->cl_nodes[node->nd_num] == node) { cluster 631 fs/ocfs2/cluster/nodemanager.c cluster->cl_nodes[node->nd_num] = NULL; cluster 632 fs/ocfs2/cluster/nodemanager.c clear_bit(node->nd_num, cluster->cl_nodes_bitmap); cluster 634 fs/ocfs2/cluster/nodemanager.c write_unlock(&cluster->cl_nodes_lock); cluster 656 fs/ocfs2/cluster/nodemanager.c struct o2nm_cluster *cluster = to_o2nm_cluster(item); cluster 658 fs/ocfs2/cluster/nodemanager.c kfree(cluster); cluster 690 fs/ocfs2/cluster/nodemanager.c struct o2nm_cluster *cluster = NULL; cluster 699 fs/ocfs2/cluster/nodemanager.c cluster = kzalloc(sizeof(struct o2nm_cluster), GFP_KERNEL); cluster 702 fs/ocfs2/cluster/nodemanager.c if (cluster == NULL || ns == NULL || o2hb_group == NULL) cluster 705 fs/ocfs2/cluster/nodemanager.c config_group_init_type_name(&cluster->cl_group, name, cluster 707 fs/ocfs2/cluster/nodemanager.c configfs_add_default_group(&ns->ns_group, &cluster->cl_group); cluster 711 fs/ocfs2/cluster/nodemanager.c configfs_add_default_group(o2hb_group, &cluster->cl_group); cluster 713 fs/ocfs2/cluster/nodemanager.c rwlock_init(&cluster->cl_nodes_lock); cluster 714 fs/ocfs2/cluster/nodemanager.c cluster->cl_node_ip_tree = RB_ROOT; cluster 715 fs/ocfs2/cluster/nodemanager.c cluster->cl_reconnect_delay_ms = O2NET_RECONNECT_DELAY_MS_DEFAULT; cluster 716 fs/ocfs2/cluster/nodemanager.c cluster->cl_idle_timeout_ms = O2NET_IDLE_TIMEOUT_MS_DEFAULT; cluster 717 fs/ocfs2/cluster/nodemanager.c cluster->cl_keepalive_delay_ms = O2NET_KEEPALIVE_DELAY_MS_DEFAULT; cluster 718 fs/ocfs2/cluster/nodemanager.c cluster->cl_fence_method = O2NM_FENCE_RESET; cluster 720 fs/ocfs2/cluster/nodemanager.c ret = &cluster->cl_group; cluster 721 fs/ocfs2/cluster/nodemanager.c o2nm_single_cluster = cluster; cluster 725 fs/ocfs2/cluster/nodemanager.c kfree(cluster); cluster 736 fs/ocfs2/cluster/nodemanager.c struct o2nm_cluster *cluster = to_o2nm_cluster(item); cluster 738 fs/ocfs2/cluster/nodemanager.c BUG_ON(o2nm_single_cluster != cluster); cluster 741 fs/ocfs2/cluster/nodemanager.c configfs_remove_default_groups(&cluster->cl_group); cluster 31 fs/ocfs2/extent_map.h void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cluster); cluster 442 fs/ocfs2/ioctl.c unsigned int offset = 0, cluster, chunk; cluster 491 fs/ocfs2/ioctl.c for (cluster = 0; cluster < num_clusters; cluster++) { cluster 48 fs/ocfs2/resize.c u32 cluster, lgd_cluster; cluster 53 fs/ocfs2/resize.c cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno); cluster 55 fs/ocfs2/resize.c gd_blkno = ocfs2_which_cluster_group(inode, cluster); cluster 64 fs/ocfs2/resize.c if (lgd_cluster >= cluster) cluster 68 fs/ocfs2/resize.c ocfs2_set_bit(cluster % cl_cpg, cluster 71 fs/ocfs2/resize.c ocfs2_clear_bit(cluster % cl_cpg, cluster 175 fs/ocfs2/resize.c u32 cluster; cluster 184 fs/ocfs2/resize.c cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno); cluster 185 fs/ocfs2/resize.c if (cluster >= clusters) cluster 417 fs/ocfs2/resize.c u32 cluster = ocfs2_blocks_to_clusters(inode->i_sb, input->group); cluster 421 fs/ocfs2/resize.c if (cluster < total_clusters) cluster 437 fs/ocfs2/resize.c else if (input->group != ocfs2_which_cluster_group(inode, cluster)) cluster 2241 fs/ocfs2/suballoc.c u32 cluster = 0; cluster 2246 fs/ocfs2/suballoc.c cluster = ocfs2_blocks_to_clusters(inode->i_sb, bg_blkno); cluster 2247 fs/ocfs2/suballoc.c cluster += (u32) bg_bit_off; cluster 2248 fs/ocfs2/suballoc.c return cluster; cluster 2253 fs/ocfs2/suballoc.c u64 ocfs2_which_cluster_group(struct inode *inode, u32 cluster) cluster 2260 fs/ocfs2/suballoc.c group_no = cluster / osb->bitmap_cpg; cluster 179 fs/ocfs2/suballoc.h u64 ocfs2_which_cluster_group(struct inode *inode, u32 cluster); cluster 83 include/linux/dlm.h int dlm_new_lockspace(const char *name, const char *cluster, cluster 215 include/media/v4l2-ctrls.h struct v4l2_ctrl **cluster; cluster 71 include/soc/nps/common.h u32 __reserved:20, cluster:4, core:4, thread:4; cluster 73 include/soc/nps/common.h u32 __reserved:24, cluster:4, core:4; cluster 100 include/soc/nps/common.h static inline int nps_cluster_logic_to_phys(int cluster) cluster 108 include/soc/nps/common.h : "+r"(cluster) cluster 114 include/soc/nps/common.h return cluster; cluster 119 include/soc/nps/common.h nps_cluster_logic_to_phys(gid.cluster); }) cluster 1285 include/trace/events/btrfs.h const struct btrfs_free_cluster *cluster, cluster 1288 include/trace/events/btrfs.h TP_ARGS(block_group, cluster, size, bitmap), cluster 1302 include/trace/events/btrfs.h __entry->start = cluster->window_start; cluster 1303 include/trace/events/btrfs.h __entry->max_size = cluster->max_size; cluster 268 include/uapi/linux/tipc.h unsigned int cluster, cluster 272 include/uapi/linux/tipc.h (cluster << TIPC_CLUSTER_OFFSET) | cluster 602 mm/swapfile.c struct percpu_cluster *cluster; cluster 608 mm/swapfile.c cluster = this_cpu_ptr(si->percpu_cluster); cluster 609 mm/swapfile.c if (cluster_is_null(&cluster->index)) { cluster 611 mm/swapfile.c cluster->index = si->free_clusters.head; cluster 612 mm/swapfile.c cluster->next = cluster_next(&cluster->index) * cluster 632 mm/swapfile.c tmp = cluster->next; cluster 634 mm/swapfile.c (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER); cluster 636 mm/swapfile.c cluster_set_null(&cluster->index); cluster 649 mm/swapfile.c cluster_set_null(&cluster->index); cluster 652 mm/swapfile.c cluster->next = tmp + 1; cluster 3220 mm/swapfile.c struct percpu_cluster *cluster; cluster 3221 mm/swapfile.c cluster = per_cpu_ptr(p->percpu_cluster, cpu); cluster 3222 mm/swapfile.c cluster_set_null(&cluster->index); cluster 325 sound/usb/stream.c *cluster) cluster 327 sound/usb/stream.c unsigned int channels = cluster->bNrChannels; cluster 329 sound/usb/stream.c void *p = cluster; cluster 339 sound/usb/stream.c len = le16_to_cpu(cluster->wLength); cluster 343 sound/usb/stream.c while (((p - (void *)cluster) < len) && (c < channels)) { cluster 863 sound/usb/stream.c struct uac3_cluster_header_descriptor *cluster; cluster 978 sound/usb/stream.c cluster = kzalloc(wLength, GFP_KERNEL); cluster 979 sound/usb/stream.c if (!cluster) cluster 987 sound/usb/stream.c cluster, wLength); cluster 989 sound/usb/stream.c kfree(cluster); cluster 995 sound/usb/stream.c kfree(cluster); cluster 999 sound/usb/stream.c num_channels = cluster->bNrChannels; cluster 1000 sound/usb/stream.c chmap = convert_chmap_v3(cluster); cluster 1001 sound/usb/stream.c kfree(cluster);