cpu_base 327 arch/mips/ar7/clock.c int cpu_base, cpu_mul, cpu_prediv, cpu_postdiv; cpu_base 331 arch/mips/ar7/clock.c cpu_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_CPU, bootcr); cpu_base 347 arch/mips/ar7/clock.c calculate(cpu_base, TNETD7200_DEF_CPU_CLK, &cpu_prediv, cpu_base 350 arch/mips/ar7/clock.c ((cpu_base / cpu_prediv) * cpu_mul) / cpu_postdiv; cpu_base 351 arch/mips/ar7/clock.c tnetd7200_set_clock(cpu_base, &clocks->cpu, cpu_base 360 arch/mips/ar7/clock.c calculate(cpu_base, TNETD7200_DEF_CPU_CLK, &cpu_prediv, cpu_base 362 arch/mips/ar7/clock.c cpu_clk.rate = ((cpu_base / cpu_prediv) * cpu_mul) cpu_base 364 arch/mips/ar7/clock.c tnetd7200_set_clock(cpu_base, &clocks->cpu, cpu_base 1383 drivers/infiniband/hw/hns/hns_roce_hem.c void *cpu_base; cpu_base 1419 drivers/infiniband/hw/hns/hns_roce_hem.c cpu_base = root_hem->addr + total * BA_BYTE_LEN; cpu_base 1433 drivers/infiniband/hw/hns/hns_roce_hem.c hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base); cpu_base 1447 drivers/infiniband/hw/hns/hns_roce_hem.c hem_list_link_bt(hr_dev, cpu_base + offset, cpu_base 1555 drivers/infiniband/hw/hns/hns_roce_hem.c void *cpu_base = NULL; cpu_base 1562 drivers/infiniband/hw/hns/hns_roce_hem.c cpu_base = hem->addr + nr * BA_BYTE_LEN; cpu_base 1575 drivers/infiniband/hw/hns/hns_roce_hem.c return cpu_base; cpu_base 71 drivers/irqchip/irq-gic.c union gic_base cpu_base; cpu_base 145 drivers/irqchip/irq-gic.c return data->get_base(&data->cpu_base); cpu_base 155 drivers/irqchip/irq-gic.c #define gic_data_cpu_base(d) ((d)->cpu_base.common_base) cpu_base 362 drivers/irqchip/irq-gic.c void __iomem *cpu_base = gic_data_cpu_base(gic); cpu_base 365 drivers/irqchip/irq-gic.c irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); cpu_base 370 drivers/irqchip/irq-gic.c writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); cpu_base 376 drivers/irqchip/irq-gic.c writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); cpu_base 378 drivers/irqchip/irq-gic.c writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE); cpu_base 469 drivers/irqchip/irq-gic.c void __iomem *cpu_base = gic_data_cpu_base(gic); cpu_base 477 drivers/irqchip/irq-gic.c if (gic_check_gicv2(cpu_base)) cpu_base 479 drivers/irqchip/irq-gic.c writel_relaxed(0, cpu_base + GIC_CPU_ACTIVEPRIO + i * 4); cpu_base 484 drivers/irqchip/irq-gic.c bypass = readl(cpu_base + GIC_CPU_CTRL); cpu_base 487 drivers/irqchip/irq-gic.c writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL); cpu_base 556 drivers/irqchip/irq-gic.c void __iomem *cpu_base; cpu_base 562 drivers/irqchip/irq-gic.c cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); cpu_base 563 drivers/irqchip/irq-gic.c val = readl(cpu_base + GIC_CPU_CTRL); cpu_base 565 drivers/irqchip/irq-gic.c writel_relaxed(val, cpu_base + GIC_CPU_CTRL); cpu_base 667 drivers/irqchip/irq-gic.c void __iomem *cpu_base; cpu_base 673 drivers/irqchip/irq-gic.c cpu_base = gic_data_cpu_base(gic); cpu_base 675 drivers/irqchip/irq-gic.c if (!dist_base || !cpu_base) cpu_base 697 drivers/irqchip/irq-gic.c void __iomem *cpu_base; cpu_base 703 drivers/irqchip/irq-gic.c cpu_base = gic_data_cpu_base(gic); cpu_base 705 drivers/irqchip/irq-gic.c if (!dist_base || !cpu_base) cpu_base 730 drivers/irqchip/irq-gic.c writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK); cpu_base 1107 drivers/irqchip/irq-gic.c gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); cpu_base 1109 drivers/irqchip/irq-gic.c !gic->cpu_base.percpu_base)) { cpu_base 1120 drivers/irqchip/irq-gic.c *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base 1131 drivers/irqchip/irq-gic.c gic->cpu_base.common_base = gic->raw_cpu_base; cpu_base 1188 drivers/irqchip/irq-gic.c free_percpu(gic->cpu_base.percpu_base); cpu_base 1237 drivers/irqchip/irq-gic.c void __init gic_init(void __iomem *dist_base, void __iomem *cpu_base) cpu_base 1249 drivers/irqchip/irq-gic.c gic->raw_cpu_base = cpu_base; cpu_base 54 drivers/irqchip/irq-hip04.c void __iomem *cpu_base; cpu_base 80 drivers/irqchip/irq-hip04.c return hip04_data->cpu_base; cpu_base 179 drivers/irqchip/irq-hip04.c void __iomem *cpu_base = hip04_data.cpu_base; cpu_base 182 drivers/irqchip/irq-hip04.c irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); cpu_base 190 drivers/irqchip/irq-hip04.c writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); cpu_base 257 drivers/irqchip/irq-hip04.c void __iomem *base = intc->cpu_base; cpu_base 373 drivers/irqchip/irq-hip04.c hip04_data.cpu_base = of_iomap(node, 1); cpu_base 374 drivers/irqchip/irq-hip04.c WARN(!hip04_data.cpu_base, "unable to map hip04 intc cpu registers\n"); cpu_base 124 drivers/net/ethernet/broadcom/bgmac.c dma_desc = &ring->cpu_base[i]; cpu_base 217 drivers/net/ethernet/broadcom/bgmac.c u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1); cpu_base 257 drivers/net/ethernet/broadcom/bgmac.c ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0); cpu_base 258 drivers/net/ethernet/broadcom/bgmac.c ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1); cpu_base 380 drivers/net/ethernet/broadcom/bgmac.c struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx; cpu_base 529 drivers/net/ethernet/broadcom/bgmac.c struct bgmac_dma_desc *dma_desc = ring->cpu_base; cpu_base 579 drivers/net/ethernet/broadcom/bgmac.c if (!ring->cpu_base) cpu_base 584 drivers/net/ethernet/broadcom/bgmac.c dma_free_coherent(dma_dev, size, ring->cpu_base, cpu_base 637 drivers/net/ethernet/broadcom/bgmac.c ring->cpu_base = dma_alloc_coherent(dma_dev, size, cpu_base 640 drivers/net/ethernet/broadcom/bgmac.c if (!ring->cpu_base) { cpu_base 662 drivers/net/ethernet/broadcom/bgmac.c ring->cpu_base = dma_alloc_coherent(dma_dev, size, cpu_base 665 drivers/net/ethernet/broadcom/bgmac.c if (!ring->cpu_base) { cpu_base 464 drivers/net/ethernet/broadcom/bgmac.h struct bgmac_dma_desc *cpu_base; cpu_base 3592 drivers/net/ethernet/broadcom/tg3.c static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base) cpu_base 3598 drivers/net/ethernet/broadcom/tg3.c tw32(cpu_base + CPU_STATE, 0xffffffff); cpu_base 3599 drivers/net/ethernet/broadcom/tg3.c tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); cpu_base 3600 drivers/net/ethernet/broadcom/tg3.c if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT) cpu_base 3628 drivers/net/ethernet/broadcom/tg3.c static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base) cpu_base 3630 drivers/net/ethernet/broadcom/tg3.c tw32(cpu_base + CPU_STATE, 0xffffffff); cpu_base 3631 drivers/net/ethernet/broadcom/tg3.c tw32_f(cpu_base + CPU_MODE, 0x00000000); cpu_base 3641 drivers/net/ethernet/broadcom/tg3.c static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base) cpu_base 3645 drivers/net/ethernet/broadcom/tg3.c BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); cpu_base 3653 drivers/net/ethernet/broadcom/tg3.c if (cpu_base == RX_CPU_BASE) { cpu_base 3668 drivers/net/ethernet/broadcom/tg3.c __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX"); cpu_base 3706 drivers/net/ethernet/broadcom/tg3.c static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, cpu_base 3714 drivers/net/ethernet/broadcom/tg3.c if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { cpu_base 3731 drivers/net/ethernet/broadcom/tg3.c err = tg3_halt_cpu(tp, cpu_base); cpu_base 3739 drivers/net/ethernet/broadcom/tg3.c tw32(cpu_base + CPU_STATE, 0xffffffff); cpu_base 3740 drivers/net/ethernet/broadcom/tg3.c tw32(cpu_base + CPU_MODE, cpu_base 3741 drivers/net/ethernet/broadcom/tg3.c tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT); cpu_base 3772 drivers/net/ethernet/broadcom/tg3.c static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc) cpu_base 3777 drivers/net/ethernet/broadcom/tg3.c tw32(cpu_base + CPU_STATE, 0xffffffff); cpu_base 3778 drivers/net/ethernet/broadcom/tg3.c tw32_f(cpu_base + CPU_PC, pc); cpu_base 3781 drivers/net/ethernet/broadcom/tg3.c if (tr32(cpu_base + CPU_PC) == pc) cpu_base 3783 drivers/net/ethernet/broadcom/tg3.c tw32(cpu_base + CPU_STATE, 0xffffffff); cpu_base 3784 drivers/net/ethernet/broadcom/tg3.c tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); cpu_base 3785 drivers/net/ethernet/broadcom/tg3.c tw32_f(cpu_base + CPU_PC, pc); cpu_base 3910 drivers/net/ethernet/broadcom/tg3.c unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; cpu_base 3927 drivers/net/ethernet/broadcom/tg3.c cpu_base = RX_CPU_BASE; cpu_base 3930 drivers/net/ethernet/broadcom/tg3.c cpu_base = TX_CPU_BASE; cpu_base 3935 drivers/net/ethernet/broadcom/tg3.c err = tg3_load_firmware_cpu(tp, cpu_base, cpu_base 3942 drivers/net/ethernet/broadcom/tg3.c err = tg3_pause_cpu_and_set_pc(tp, cpu_base, cpu_base 3947 drivers/net/ethernet/broadcom/tg3.c __func__, tr32(cpu_base + CPU_PC), cpu_base 3952 drivers/net/ethernet/broadcom/tg3.c tg3_resume_cpu(tp, cpu_base); cpu_base 159 include/linux/hrtimer.h struct hrtimer_cpu_base *cpu_base; cpu_base 312 include/linux/hrtimer.h timer->base->cpu_base->hres_active : 0; cpu_base 138 kernel/time/hrtimer.c .clock_base = { { .cpu_base = &migration_cpu_base, }, }, cpu_base 169 kernel/time/hrtimer.c raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); cpu_base 173 kernel/time/hrtimer.c raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); cpu_base 194 kernel/time/hrtimer.c return expires < new_base->cpu_base->expires_next; cpu_base 248 kernel/time/hrtimer.c raw_spin_unlock(&base->cpu_base->lock); cpu_base 249 kernel/time/hrtimer.c raw_spin_lock(&new_base->cpu_base->lock); cpu_base 253 kernel/time/hrtimer.c raw_spin_unlock(&new_base->cpu_base->lock); cpu_base 254 kernel/time/hrtimer.c raw_spin_lock(&base->cpu_base->lock); cpu_base 282 kernel/time/hrtimer.c raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); cpu_base 487 kernel/time/hrtimer.c __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active) cpu_base 497 kernel/time/hrtimer.c return &cpu_base->clock_base[idx]; cpu_base 500 kernel/time/hrtimer.c #define for_each_active_base(base, cpu_base, active) \ cpu_base 501 kernel/time/hrtimer.c while ((base = __next_base((cpu_base), &(active)))) cpu_base 503 kernel/time/hrtimer.c static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base, cpu_base 511 kernel/time/hrtimer.c for_each_active_base(base, cpu_base, active) { cpu_base 534 kernel/time/hrtimer.c cpu_base->softirq_next_timer = timer; cpu_base 536 kernel/time/hrtimer.c cpu_base->next_timer = timer; cpu_base 567 kernel/time/hrtimer.c __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask) cpu_base 573 kernel/time/hrtimer.c if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) { cpu_base 574 kernel/time/hrtimer.c active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; cpu_base 575 kernel/time/hrtimer.c cpu_base->softirq_next_timer = NULL; cpu_base 576 kernel/time/hrtimer.c expires_next = __hrtimer_next_event_base(cpu_base, NULL, cpu_base 579 kernel/time/hrtimer.c next_timer = cpu_base->softirq_next_timer; cpu_base 583 kernel/time/hrtimer.c active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; cpu_base 584 kernel/time/hrtimer.c cpu_base->next_timer = next_timer; cpu_base 585 kernel/time/hrtimer.c expires_next = __hrtimer_next_event_base(cpu_base, NULL, active, cpu_base 611 kernel/time/hrtimer.c static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base) cpu_base 614 kernel/time/hrtimer.c cpu_base->hres_active : 0; cpu_base 628 kernel/time/hrtimer.c hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) cpu_base 635 kernel/time/hrtimer.c expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); cpu_base 637 kernel/time/hrtimer.c if (cpu_base->next_timer && cpu_base->next_timer->is_soft) { cpu_base 643 kernel/time/hrtimer.c if (cpu_base->softirq_activated) cpu_base 644 kernel/time/hrtimer.c expires_next = __hrtimer_get_next_event(cpu_base, cpu_base 647 kernel/time/hrtimer.c cpu_base->softirq_expires_next = expires_next; cpu_base 650 kernel/time/hrtimer.c if (skip_equal && expires_next == cpu_base->expires_next) cpu_base 653 kernel/time/hrtimer.c cpu_base->expires_next = expires_next; cpu_base 672 kernel/time/hrtimer.c if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) cpu_base 675 kernel/time/hrtimer.c tick_program_event(cpu_base->expires_next, 1); cpu_base 777 kernel/time/hrtimer.c struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); cpu_base 798 kernel/time/hrtimer.c struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base; cpu_base 818 kernel/time/hrtimer.c if (base->cpu_base != cpu_base) cpu_base 828 kernel/time/hrtimer.c if (cpu_base->in_hrtirq) cpu_base 831 kernel/time/hrtimer.c if (expires >= cpu_base->expires_next) cpu_base 835 kernel/time/hrtimer.c cpu_base->next_timer = timer; cpu_base 836 kernel/time/hrtimer.c cpu_base->expires_next = expires; cpu_base 847 kernel/time/hrtimer.c if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected) cpu_base 898 kernel/time/hrtimer.c raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); cpu_base 967 kernel/time/hrtimer.c base->cpu_base->active_bases |= 1 << base->index; cpu_base 989 kernel/time/hrtimer.c struct hrtimer_cpu_base *cpu_base = base->cpu_base; cpu_base 998 kernel/time/hrtimer.c cpu_base->active_bases &= ~(1 << base->index); cpu_base 1008 kernel/time/hrtimer.c if (reprogram && timer == cpu_base->next_timer) cpu_base 1009 kernel/time/hrtimer.c hrtimer_force_reprogram(cpu_base, 1); cpu_base 1032 kernel/time/hrtimer.c reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases); cpu_base 1060 kernel/time/hrtimer.c hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram) cpu_base 1067 kernel/time/hrtimer.c expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT); cpu_base 1081 kernel/time/hrtimer.c hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram); cpu_base 1201 kernel/time/hrtimer.c static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base, cpu_base 1204 kernel/time/hrtimer.c if (atomic_read(&cpu_base->timer_waiters)) { cpu_base 1205 kernel/time/hrtimer.c raw_spin_unlock_irqrestore(&cpu_base->lock, flags); cpu_base 1206 kernel/time/hrtimer.c spin_unlock(&cpu_base->softirq_expiry_lock); cpu_base 1207 kernel/time/hrtimer.c spin_lock(&cpu_base->softirq_expiry_lock); cpu_base 1208 kernel/time/hrtimer.c raw_spin_lock_irq(&cpu_base->lock); cpu_base 1249 kernel/time/hrtimer.c atomic_inc(&base->cpu_base->timer_waiters); cpu_base 1250 kernel/time/hrtimer.c spin_lock_bh(&base->cpu_base->softirq_expiry_lock); cpu_base 1251 kernel/time/hrtimer.c atomic_dec(&base->cpu_base->timer_waiters); cpu_base 1252 kernel/time/hrtimer.c spin_unlock_bh(&base->cpu_base->softirq_expiry_lock); cpu_base 1316 kernel/time/hrtimer.c struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); cpu_base 1320 kernel/time/hrtimer.c raw_spin_lock_irqsave(&cpu_base->lock, flags); cpu_base 1322 kernel/time/hrtimer.c if (!__hrtimer_hres_active(cpu_base)) cpu_base 1323 kernel/time/hrtimer.c expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); cpu_base 1325 kernel/time/hrtimer.c raw_spin_unlock_irqrestore(&cpu_base->lock, flags); cpu_base 1339 kernel/time/hrtimer.c struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); cpu_base 1343 kernel/time/hrtimer.c raw_spin_lock_irqsave(&cpu_base->lock, flags); cpu_base 1345 kernel/time/hrtimer.c if (__hrtimer_hres_active(cpu_base)) { cpu_base 1348 kernel/time/hrtimer.c if (!cpu_base->softirq_activated) { cpu_base 1349 kernel/time/hrtimer.c active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT; cpu_base 1350 kernel/time/hrtimer.c expires = __hrtimer_next_event_base(cpu_base, exclude, cpu_base 1353 kernel/time/hrtimer.c active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD; cpu_base 1354 kernel/time/hrtimer.c expires = __hrtimer_next_event_base(cpu_base, exclude, active, cpu_base 1358 kernel/time/hrtimer.c raw_spin_unlock_irqrestore(&cpu_base->lock, flags); cpu_base 1380 kernel/time/hrtimer.c struct hrtimer_cpu_base *cpu_base; cpu_base 1394 kernel/time/hrtimer.c cpu_base = raw_cpu_ptr(&hrtimer_bases); cpu_base 1408 kernel/time/hrtimer.c timer->base = &cpu_base->clock_base[base]; cpu_base 1477 kernel/time/hrtimer.c static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, cpu_base 1485 kernel/time/hrtimer.c lockdep_assert_held(&cpu_base->lock); cpu_base 1515 kernel/time/hrtimer.c raw_spin_unlock_irqrestore(&cpu_base->lock, flags); cpu_base 1519 kernel/time/hrtimer.c raw_spin_lock_irq(&cpu_base->lock); cpu_base 1547 kernel/time/hrtimer.c static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now, cpu_base 1551 kernel/time/hrtimer.c unsigned int active = cpu_base->active_bases & active_mask; cpu_base 1553 kernel/time/hrtimer.c for_each_active_base(base, cpu_base, active) { cpu_base 1579 kernel/time/hrtimer.c __run_hrtimer(cpu_base, base, timer, &basenow, flags); cpu_base 1581 kernel/time/hrtimer.c hrtimer_sync_wait_running(cpu_base, flags); cpu_base 1588 kernel/time/hrtimer.c struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); cpu_base 1592 kernel/time/hrtimer.c hrtimer_cpu_base_lock_expiry(cpu_base); cpu_base 1593 kernel/time/hrtimer.c raw_spin_lock_irqsave(&cpu_base->lock, flags); cpu_base 1595 kernel/time/hrtimer.c now = hrtimer_update_base(cpu_base); cpu_base 1596 kernel/time/hrtimer.c __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT); cpu_base 1598 kernel/time/hrtimer.c cpu_base->softirq_activated = 0; cpu_base 1599 kernel/time/hrtimer.c hrtimer_update_softirq_timer(cpu_base, true); cpu_base 1601 kernel/time/hrtimer.c raw_spin_unlock_irqrestore(&cpu_base->lock, flags); cpu_base 1602 kernel/time/hrtimer.c hrtimer_cpu_base_unlock_expiry(cpu_base); cpu_base 1613 kernel/time/hrtimer.c struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); cpu_base 1618 kernel/time/hrtimer.c BUG_ON(!cpu_base->hres_active); cpu_base 1619 kernel/time/hrtimer.c cpu_base->nr_events++; cpu_base 1622 kernel/time/hrtimer.c raw_spin_lock_irqsave(&cpu_base->lock, flags); cpu_base 1623 kernel/time/hrtimer.c entry_time = now = hrtimer_update_base(cpu_base); cpu_base 1625 kernel/time/hrtimer.c cpu_base->in_hrtirq = 1; cpu_base 1633 kernel/time/hrtimer.c cpu_base->expires_next = KTIME_MAX; cpu_base 1635 kernel/time/hrtimer.c if (!ktime_before(now, cpu_base->softirq_expires_next)) { cpu_base 1636 kernel/time/hrtimer.c cpu_base->softirq_expires_next = KTIME_MAX; cpu_base 1637 kernel/time/hrtimer.c cpu_base->softirq_activated = 1; cpu_base 1641 kernel/time/hrtimer.c __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); cpu_base 1644 kernel/time/hrtimer.c expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL); cpu_base 1649 kernel/time/hrtimer.c cpu_base->expires_next = expires_next; cpu_base 1650 kernel/time/hrtimer.c cpu_base->in_hrtirq = 0; cpu_base 1651 kernel/time/hrtimer.c raw_spin_unlock_irqrestore(&cpu_base->lock, flags); cpu_base 1655 kernel/time/hrtimer.c cpu_base->hang_detected = 0; cpu_base 1672 kernel/time/hrtimer.c raw_spin_lock_irqsave(&cpu_base->lock, flags); cpu_base 1673 kernel/time/hrtimer.c now = hrtimer_update_base(cpu_base); cpu_base 1674 kernel/time/hrtimer.c cpu_base->nr_retries++; cpu_base 1683 kernel/time/hrtimer.c cpu_base->nr_hangs++; cpu_base 1684 kernel/time/hrtimer.c cpu_base->hang_detected = 1; cpu_base 1685 kernel/time/hrtimer.c raw_spin_unlock_irqrestore(&cpu_base->lock, flags); cpu_base 1688 kernel/time/hrtimer.c if ((unsigned int)delta > cpu_base->max_hang_time) cpu_base 1689 kernel/time/hrtimer.c cpu_base->max_hang_time = (unsigned int) delta; cpu_base 1726 kernel/time/hrtimer.c struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); cpu_base 1730 kernel/time/hrtimer.c if (__hrtimer_hres_active(cpu_base)) cpu_base 1745 kernel/time/hrtimer.c raw_spin_lock_irqsave(&cpu_base->lock, flags); cpu_base 1746 kernel/time/hrtimer.c now = hrtimer_update_base(cpu_base); cpu_base 1748 kernel/time/hrtimer.c if (!ktime_before(now, cpu_base->softirq_expires_next)) { cpu_base 1749 kernel/time/hrtimer.c cpu_base->softirq_expires_next = KTIME_MAX; cpu_base 1750 kernel/time/hrtimer.c cpu_base->softirq_activated = 1; cpu_base 1754 kernel/time/hrtimer.c __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); cpu_base 1755 kernel/time/hrtimer.c raw_spin_unlock_irqrestore(&cpu_base->lock, flags); cpu_base 1990 kernel/time/hrtimer.c struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); cpu_base 1994 kernel/time/hrtimer.c cpu_base->clock_base[i].cpu_base = cpu_base; cpu_base 1995 kernel/time/hrtimer.c timerqueue_init_head(&cpu_base->clock_base[i].active); cpu_base 1998 kernel/time/hrtimer.c cpu_base->cpu = cpu; cpu_base 1999 kernel/time/hrtimer.c cpu_base->active_bases = 0; cpu_base 2000 kernel/time/hrtimer.c cpu_base->hres_active = 0; cpu_base 2001 kernel/time/hrtimer.c cpu_base->hang_detected = 0; cpu_base 2002 kernel/time/hrtimer.c cpu_base->next_timer = NULL; cpu_base 2003 kernel/time/hrtimer.c cpu_base->softirq_next_timer = NULL; cpu_base 2004 kernel/time/hrtimer.c cpu_base->expires_next = KTIME_MAX; cpu_base 2005 kernel/time/hrtimer.c cpu_base->softirq_expires_next = KTIME_MAX; cpu_base 2006 kernel/time/hrtimer.c hrtimer_cpu_base_init_expiry_lock(cpu_base); cpu_base 76 kernel/time/tick-broadcast-hrtimer.c bc->bound_on = bctimer.base->cpu_base->cpu; cpu_base 86 kernel/time/timer_list.c raw_spin_lock_irqsave(&base->cpu_base->lock, flags); cpu_base 102 kernel/time/timer_list.c raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags); cpu_base 108 kernel/time/timer_list.c raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags); cpu_base 132 kernel/time/timer_list.c struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); cpu_base 138 kernel/time/timer_list.c print_base(m, cpu_base->clock_base + i, now); cpu_base 142 kernel/time/timer_list.c (unsigned long long)(cpu_base->x)) cpu_base 145 kernel/time/timer_list.c (unsigned long long)(ktime_to_ns(cpu_base->x))) cpu_base 289 virt/kvm/arm/vgic/vgic-v2.c static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base) cpu_base 293 virt/kvm/arm/vgic/vgic-v2.c if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base) cpu_base 296 virt/kvm/arm/vgic/vgic-v2.c if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base) cpu_base 298 virt/kvm/arm/vgic/vgic-v2.c if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base)