rdp 21 arch/powerpc/include/asm/cpm.h __be32 rdp; /* Internal */ rdp 42 arch/x86/include/asm/fpu/types.h u64 rdp; /* Data Pointer */ rdp 58 arch/x86/include/asm/user_64.h __u64 rdp; rdp 156 arch/x86/include/uapi/asm/sigcontext.h __u64 rdp; rdp 245 arch/x86/kernel/fpu/regset.c env->foo = fxsave->rdp; rdp 282 arch/x86/kernel/fpu/regset.c fxsave->rdp = env->foo; rdp 173 arch/x86/kernel/fpu/xstate.c fx->rdp = 0; rdp 9057 arch/x86/kvm/x86.c fpu->last_dp = fxsave->rdp; rdp 9078 arch/x86/kvm/x86.c fxsave->rdp = fpu->last_dp; rdp 2950 drivers/gpu/drm/radeon/r100.c uint32_t rdp, wdp; rdp 2954 drivers/gpu/drm/radeon/r100.c rdp = RREG32(RADEON_CP_RB_RPTR); rdp 2956 drivers/gpu/drm/radeon/r100.c count = (rdp + ring->ring_size - wdp) & ring->ptr_mask; rdp 2959 drivers/gpu/drm/radeon/r100.c seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); rdp 2964 drivers/gpu/drm/radeon/r100.c i = (rdp + j) & ring->ptr_mask; rdp 659 drivers/hwtracing/coresight/coresight-etb10.c coresight_etb10_reg(rdp, ETB_RAM_DEPTH_REG); rdp 56 drivers/i2c/busses/i2c-cpm.c uint rdp; /* Internal */ rdp 154 drivers/i2c/busses/i2c-cpm.c out_be32(&i2c_ram->rdp, 0); rdp 136 drivers/misc/mic/bus/scif_bus.c void __iomem *rdp, struct dma_chan **chan, int num_chan, rdp 159 drivers/misc/mic/bus/scif_bus.c sdev->rdp = rdp; rdp 53 drivers/misc/mic/bus/scif_bus.h void __iomem *rdp; rdp 111 drivers/misc/mic/bus/scif_bus.h void *dp, void __iomem *rdp, rdp 89 drivers/misc/mic/scif/scif_main.c struct mic_bootparam __iomem *bp = sdev->rdp; rdp 176 drivers/misc/mic/scif/scif_main.c struct mic_bootparam __iomem *bp = sdev->rdp; rdp 218 drivers/misc/mic/scif/scif_main.c struct mic_bootparam __iomem *bp = sdev->rdp; rdp 139 drivers/net/ethernet/amd/a2065.c ll->rdp = (leptr & 0xFFFF); rdp 141 drivers/net/ethernet/amd/a2065.c ll->rdp = leptr >> 16; rdp 143 drivers/net/ethernet/amd/a2065.c ll->rdp = lp->busmaster_regval; rdp 228 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_INIT; rdp 231 drivers/net/ethernet/amd/a2065.c for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++) rdp 233 drivers/net/ethernet/amd/a2065.c if ((i == 100) || (ll->rdp & LE_C0_ERR)) { rdp 234 drivers/net/ethernet/amd/a2065.c pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp); rdp 239 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_IDON; rdp 240 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_INEA | LE_C0_STRT; rdp 269 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_RINT | LE_C0_INEA; rdp 334 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_TINT | LE_C0_INEA; rdp 362 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_STOP; rdp 379 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_STOP; rdp 403 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_TINT | LE_C0_INEA; rdp 422 drivers/net/ethernet/amd/a2065.c csr0 = ll->rdp; rdp 428 drivers/net/ethernet/amd/a2065.c ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT | rdp 433 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA; rdp 451 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_STRT; rdp 458 drivers/net/ethernet/amd/a2065.c ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR | rdp 471 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_STOP; rdp 497 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_STOP; rdp 511 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_STOP; rdp 530 drivers/net/ethernet/amd/a2065.c netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp); rdp 574 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_INEA | LE_C0_TDMD; rdp 628 drivers/net/ethernet/amd/a2065.c ll->rdp = LE_C0_STOP; rdp 41 drivers/net/ethernet/amd/a2065.h unsigned short rdp; /* Register Data Port */ rdp 286 drivers/net/ethernet/amd/declance.c volatile unsigned short rdp; /* register data port */ rdp 314 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, (leptr & 0xFFFF)); rdp 316 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, leptr >> 16); rdp 318 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, lp->busmaster_regval); rdp 533 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_INIT); rdp 536 drivers/net/ethernet/amd/declance.c for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) { rdp 539 drivers/net/ethernet/amd/declance.c if ((i == 100) || (ll->rdp & LE_C0_ERR)) { rdp 541 drivers/net/ethernet/amd/declance.c i, ll->rdp); rdp 544 drivers/net/ethernet/amd/declance.c if ((ll->rdp & LE_C0_ERR)) { rdp 546 drivers/net/ethernet/amd/declance.c i, ll->rdp); rdp 549 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_IDON); rdp 550 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_STRT); rdp 551 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_INEA); rdp 676 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_STOP); rdp 692 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_STOP); rdp 742 drivers/net/ethernet/amd/declance.c csr0 = ll->rdp; rdp 745 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT)); rdp 749 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | rdp 767 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_STOP); rdp 775 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_INEA); rdp 776 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_INEA); rdp 789 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_STOP); rdp 849 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_STOP); rdp 878 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_STOP); rdp 893 drivers/net/ethernet/amd/declance.c dev->name, ll->rdp); rdp 934 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD); rdp 990 drivers/net/ethernet/amd/declance.c writereg(&ll->rdp, LE_C0_STOP); rdp 221 drivers/net/ethernet/cavium/liquidio/octeon_iq.h u64 rdp; rdp 248 drivers/net/ethernet/cavium/liquidio/octeon_iq.h u64 rdp; rdp 38 drivers/net/ethernet/cavium/liquidio/octeon_nic.c struct octeon_instr_rdp *rdp; rdp 54 drivers/net/ethernet/cavium/liquidio/octeon_nic.c rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; rdp 60 drivers/net/ethernet/cavium/liquidio/octeon_nic.c rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; rdp 68 drivers/net/ethernet/cavium/liquidio/octeon_nic.c rdp->pcie_port = oct->pcie_port; rdp 69 drivers/net/ethernet/cavium/liquidio/octeon_nic.c rdp->rlen = rdatasize; rdp 597 drivers/net/ethernet/cavium/liquidio/request_manager.c struct octeon_instr_rdp *rdp; rdp 638 drivers/net/ethernet/cavium/liquidio/request_manager.c rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; rdp 639 drivers/net/ethernet/cavium/liquidio/request_manager.c rdp->pcie_port = oct->pcie_port; rdp 640 drivers/net/ethernet/cavium/liquidio/request_manager.c rdp->rlen = sc->rdatasize; rdp 675 drivers/net/ethernet/cavium/liquidio/request_manager.c rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; rdp 676 drivers/net/ethernet/cavium/liquidio/request_manager.c rdp->pcie_port = oct->pcie_port; rdp 677 drivers/net/ethernet/cavium/liquidio/request_manager.c rdp->rlen = sc->rdatasize; rdp 672 drivers/net/vxlan.c __u32 ifindex, struct vxlan_rdst **rdp) rdp 697 drivers/net/vxlan.c *rdp = rd; rdp 218 drivers/pnp/isapnp/core.c int rdp = isapnp_rdp; rdp 225 drivers/pnp/isapnp/core.c while (rdp <= 0x3ff) { rdp 230 drivers/pnp/isapnp/core.c if ((rdp < 0x280 || rdp > 0x380) rdp 231 drivers/pnp/isapnp/core.c && request_region(rdp, 1, "ISAPnP")) { rdp 232 drivers/pnp/isapnp/core.c isapnp_rdp = rdp; rdp 233 drivers/pnp/isapnp/core.c old_rdp = rdp; rdp 236 drivers/pnp/isapnp/core.c rdp += RDP_STEP; rdp 360 drivers/spi/spi-fsl-cpm.c out_be32(&mspi->pram->rdp, 0); rdp 151 kernel/rcu/tree.c static void rcu_report_exp_rdp(struct rcu_data *rdp); rdp 212 kernel/rcu/tree.c struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); rdp 214 kernel/rcu/tree.c if (rcu_segcblist_is_enabled(&rdp->cblist)) rdp 215 kernel/rcu/tree.c return rcu_segcblist_n_cbs(&rdp->cblist); rdp 231 kernel/rcu/tree.c struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 239 kernel/rcu/tree.c seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); rdp 254 kernel/rcu/tree.c struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 262 kernel/rcu/tree.c seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); rdp 266 kernel/rcu/tree.c atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks); rdp 285 kernel/rcu/tree.c struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 287 kernel/rcu/tree.c if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR) rdp 289 kernel/rcu/tree.c atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks); rdp 299 kernel/rcu/tree.c struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 301 kernel/rcu/tree.c return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR); rdp 308 kernel/rcu/tree.c int rcu_dynticks_snap(struct rcu_data *rdp) rdp 310 kernel/rcu/tree.c int snap = atomic_add_return(0, &rdp->dynticks); rdp 329 kernel/rcu/tree.c static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap) rdp 331 kernel/rcu/tree.c return snap != rcu_dynticks_snap(rdp); rdp 345 kernel/rcu/tree.c struct rcu_data *rdp = &per_cpu(rcu_data, cpu); rdp 348 kernel/rcu/tree.c old = atomic_read(&rdp->dynticks); rdp 352 kernel/rcu/tree.c } while (atomic_cmpxchg(&rdp->dynticks, old, new) != old); rdp 498 kernel/rcu/tree.c static void force_qs_rnp(int (*f)(struct rcu_data *rdp)); rdp 567 kernel/rcu/tree.c struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 569 kernel/rcu/tree.c WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE); rdp 570 kernel/rcu/tree.c WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); rdp 572 kernel/rcu/tree.c rdp->dynticks_nesting == 0); rdp 573 kernel/rcu/tree.c if (rdp->dynticks_nesting != 1) { rdp 574 kernel/rcu/tree.c rdp->dynticks_nesting--; rdp 579 kernel/rcu/tree.c trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks)); rdp 581 kernel/rcu/tree.c rdp = this_cpu_ptr(&rcu_data); rdp 582 kernel/rcu/tree.c do_nocb_deferred_wakeup(rdp); rdp 585 kernel/rcu/tree.c WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */ rdp 637 kernel/rcu/tree.c struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 644 kernel/rcu/tree.c WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0); rdp 651 kernel/rcu/tree.c if (rdp->dynticks_nmi_nesting != 1) { rdp 652 kernel/rcu/tree.c trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp 653 kernel/rcu/tree.c atomic_read(&rdp->dynticks)); rdp 654 kernel/rcu/tree.c WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */ rdp 655 kernel/rcu/tree.c rdp->dynticks_nmi_nesting - 2); rdp 660 kernel/rcu/tree.c trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks)); rdp 661 kernel/rcu/tree.c WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ rdp 733 kernel/rcu/tree.c struct rcu_data *rdp; rdp 737 kernel/rcu/tree.c rdp = this_cpu_ptr(&rcu_data); rdp 738 kernel/rcu/tree.c oldval = rdp->dynticks_nesting; rdp 741 kernel/rcu/tree.c rdp->dynticks_nesting++; rdp 747 kernel/rcu/tree.c trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks)); rdp 749 kernel/rcu/tree.c WRITE_ONCE(rdp->dynticks_nesting, 1); rdp 750 kernel/rcu/tree.c WARN_ON_ONCE(rdp->dynticks_nmi_nesting); rdp 751 kernel/rcu/tree.c WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE); rdp 803 kernel/rcu/tree.c struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 807 kernel/rcu/tree.c WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0); rdp 830 kernel/rcu/tree.c rdp->dynticks_nmi_nesting, rdp 831 kernel/rcu/tree.c rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks)); rdp 832 kernel/rcu/tree.c WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */ rdp 833 kernel/rcu/tree.c rdp->dynticks_nmi_nesting + incby); rdp 943 kernel/rcu/tree.c struct rcu_data *rdp; rdp 950 kernel/rcu/tree.c rdp = this_cpu_ptr(&rcu_data); rdp 951 kernel/rcu/tree.c rnp = rdp->mynode; rdp 952 kernel/rcu/tree.c if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) rdp 968 kernel/rcu/tree.c static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp) rdp 971 kernel/rcu/tree.c if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4, rdp 973 kernel/rcu/tree.c WRITE_ONCE(rdp->gpwrap, true); rdp 974 kernel/rcu/tree.c if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq)) rdp 975 kernel/rcu/tree.c rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4; rdp 983 kernel/rcu/tree.c static int dyntick_save_progress_counter(struct rcu_data *rdp) rdp 985 kernel/rcu/tree.c rdp->dynticks_snap = rcu_dynticks_snap(rdp); rdp 986 kernel/rcu/tree.c if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) { rdp 987 kernel/rcu/tree.c trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); rdp 988 kernel/rcu/tree.c rcu_gpnum_ovf(rdp->mynode, rdp); rdp 1000 kernel/rcu/tree.c static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) rdp 1005 kernel/rcu/tree.c struct rcu_node *rnp = rdp->mynode; rdp 1015 kernel/rcu/tree.c if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) { rdp 1016 kernel/rcu/tree.c trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti")); rdp 1017 kernel/rcu/tree.c rcu_gpnum_ovf(rnp, rdp); rdp 1022 kernel/rcu/tree.c if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) && rdp 1034 kernel/rcu/tree.c onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); rdp 1036 kernel/rcu/tree.c __func__, rdp->cpu, ".o"[onl], rdp 1037 kernel/rcu/tree.c (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, rdp 1038 kernel/rcu/tree.c (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); rdp 1054 kernel/rcu/tree.c ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu); rdp 1055 kernel/rcu/tree.c rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu); rdp 1074 kernel/rcu/tree.c if (tick_nohz_full_cpu(rdp->cpu) && rdp 1076 kernel/rcu/tree.c READ_ONCE(rdp->last_fqs_resched) + jtsq * 3)) { rdp 1077 kernel/rcu/tree.c resched_cpu(rdp->cpu); rdp 1078 kernel/rcu/tree.c WRITE_ONCE(rdp->last_fqs_resched, jiffies); rdp 1090 kernel/rcu/tree.c READ_ONCE(rdp->last_fqs_resched) + jtsq)) { rdp 1091 kernel/rcu/tree.c resched_cpu(rdp->cpu); rdp 1092 kernel/rcu/tree.c WRITE_ONCE(rdp->last_fqs_resched, jiffies); rdp 1095 kernel/rcu/tree.c !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && rdp 1096 kernel/rcu/tree.c (rnp->ffmask & rdp->grpmask)) { rdp 1097 kernel/rcu/tree.c init_irq_work(&rdp->rcu_iw, rcu_iw_handler); rdp 1098 kernel/rcu/tree.c rdp->rcu_iw_pending = true; rdp 1099 kernel/rcu/tree.c rdp->rcu_iw_gp_seq = rnp->gp_seq; rdp 1100 kernel/rcu/tree.c irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); rdp 1108 kernel/rcu/tree.c static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp, rdp 1131 kernel/rcu/tree.c static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp, rdp 1147 kernel/rcu/tree.c trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf")); rdp 1155 kernel/rcu/tree.c trace_rcu_this_gp(rnp, rdp, gp_seq_req, rdp 1167 kernel/rcu/tree.c trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, rdp 1179 kernel/rcu/tree.c trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot")); rdp 1182 kernel/rcu/tree.c trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot")); rdp 1186 kernel/rcu/tree.c trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread")); rdp 1195 kernel/rcu/tree.c rdp->gp_seq_needed = rnp->gp_seq_needed; rdp 1209 kernel/rcu/tree.c struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 1214 kernel/rcu/tree.c trace_rcu_this_gp(rnp, rdp, rnp->gp_seq, rdp 1257 kernel/rcu/tree.c static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp) rdp 1262 kernel/rcu/tree.c rcu_lockdep_assert_cblist_protected(rdp); rdp 1266 kernel/rcu/tree.c if (!rcu_segcblist_pend_cbs(&rdp->cblist)) rdp 1280 kernel/rcu/tree.c if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req)) rdp 1281 kernel/rcu/tree.c ret = rcu_start_this_gp(rnp, rdp, gp_seq_req); rdp 1284 kernel/rcu/tree.c if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) rdp 1285 kernel/rcu/tree.c trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccWaitCB")); rdp 1287 kernel/rcu/tree.c trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("AccReadyCB")); rdp 1299 kernel/rcu/tree.c struct rcu_data *rdp) rdp 1304 kernel/rcu/tree.c rcu_lockdep_assert_cblist_protected(rdp); rdp 1306 kernel/rcu/tree.c if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) { rdp 1308 kernel/rcu/tree.c (void)rcu_segcblist_accelerate(&rdp->cblist, c); rdp 1312 kernel/rcu/tree.c needwake = rcu_accelerate_cbs(rnp, rdp); rdp 1328 kernel/rcu/tree.c static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp) rdp 1330 kernel/rcu/tree.c rcu_lockdep_assert_cblist_protected(rdp); rdp 1334 kernel/rcu/tree.c if (!rcu_segcblist_pend_cbs(&rdp->cblist)) rdp 1341 kernel/rcu/tree.c rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq); rdp 1344 kernel/rcu/tree.c return rcu_accelerate_cbs(rnp, rdp); rdp 1352 kernel/rcu/tree.c struct rcu_data *rdp) rdp 1354 kernel/rcu/tree.c rcu_lockdep_assert_cblist_protected(rdp); rdp 1358 kernel/rcu/tree.c WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp)); rdp 1368 kernel/rcu/tree.c static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp) rdp 1373 kernel/rcu/tree.c rcu_segcblist_is_offloaded(&rdp->cblist); rdp 1377 kernel/rcu/tree.c if (rdp->gp_seq == rnp->gp_seq) rdp 1381 kernel/rcu/tree.c if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) || rdp 1382 kernel/rcu/tree.c unlikely(READ_ONCE(rdp->gpwrap))) { rdp 1384 kernel/rcu/tree.c ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */ rdp 1385 kernel/rcu/tree.c trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend")); rdp 1388 kernel/rcu/tree.c ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */ rdp 1392 kernel/rcu/tree.c if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) || rdp 1393 kernel/rcu/tree.c unlikely(READ_ONCE(rdp->gpwrap))) { rdp 1400 kernel/rcu/tree.c need_gp = !!(rnp->qsmask & rdp->grpmask); rdp 1401 kernel/rcu/tree.c rdp->cpu_no_qs.b.norm = need_gp; rdp 1402 kernel/rcu/tree.c rdp->core_needs_qs = need_gp; rdp 1403 kernel/rcu/tree.c zero_cpu_stall_ticks(rdp); rdp 1405 kernel/rcu/tree.c rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */ rdp 1406 kernel/rcu/tree.c if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap) rdp 1407 kernel/rcu/tree.c rdp->gp_seq_needed = rnp->gp_seq_needed; rdp 1408 kernel/rcu/tree.c WRITE_ONCE(rdp->gpwrap, false); rdp 1409 kernel/rcu/tree.c rcu_gpnum_ovf(rnp, rdp); rdp 1413 kernel/rcu/tree.c static void note_gp_changes(struct rcu_data *rdp) rdp 1420 kernel/rcu/tree.c rnp = rdp->mynode; rdp 1421 kernel/rcu/tree.c if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) && rdp 1422 kernel/rcu/tree.c !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */ rdp 1427 kernel/rcu/tree.c needwake = __note_gp_changes(rnp, rdp); rdp 1449 kernel/rcu/tree.c struct rcu_data *rdp; rdp 1547 kernel/rcu/tree.c rdp = this_cpu_ptr(&rcu_data); rdp 1551 kernel/rcu/tree.c if (rnp == rdp->mynode) rdp 1552 kernel/rcu/tree.c (void)__note_gp_changes(rnp, rdp); rdp 1690 kernel/rcu/tree.c struct rcu_data *rdp; rdp 1728 kernel/rcu/tree.c rdp = this_cpu_ptr(&rcu_data); rdp 1729 kernel/rcu/tree.c if (rnp == rdp->mynode) rdp 1730 kernel/rcu/tree.c needgp = __note_gp_changes(rnp, rdp) || needgp; rdp 1748 kernel/rcu/tree.c rdp = this_cpu_ptr(&rcu_data); rdp 1750 kernel/rcu/tree.c trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed, rdp 1756 kernel/rcu/tree.c rcu_segcblist_is_offloaded(&rdp->cblist); rdp 1757 kernel/rcu/tree.c if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) { rdp 1947 kernel/rcu/tree.c rcu_report_qs_rdp(int cpu, struct rcu_data *rdp) rdp 1953 kernel/rcu/tree.c rcu_segcblist_is_offloaded(&rdp->cblist); rdp 1956 kernel/rcu/tree.c rnp = rdp->mynode; rdp 1958 kernel/rcu/tree.c if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq || rdp 1959 kernel/rcu/tree.c rdp->gpwrap) { rdp 1967 kernel/rcu/tree.c rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */ rdp 1971 kernel/rcu/tree.c mask = rdp->grpmask; rdp 1972 kernel/rcu/tree.c rdp->core_needs_qs = false; rdp 1981 kernel/rcu/tree.c needwake = rcu_accelerate_cbs(rnp, rdp); rdp 1997 kernel/rcu/tree.c rcu_check_quiescent_state(struct rcu_data *rdp) rdp 2000 kernel/rcu/tree.c note_gp_changes(rdp); rdp 2006 kernel/rcu/tree.c if (!rdp->core_needs_qs) rdp 2013 kernel/rcu/tree.c if (rdp->cpu_no_qs.b.norm) rdp 2020 kernel/rcu/tree.c rcu_report_qs_rdp(rdp->cpu, rdp); rdp 2030 kernel/rcu/tree.c struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 2031 kernel/rcu/tree.c struct rcu_node *rnp = rdp->mynode; rdp 2036 kernel/rcu/tree.c blkd = !!(rnp->qsmask & rdp->grpmask); rdp 2095 kernel/rcu/tree.c struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); rdp 2096 kernel/rcu/tree.c struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ rdp 2112 kernel/rcu/tree.c static void rcu_do_batch(struct rcu_data *rdp) rdp 2116 kernel/rcu/tree.c rcu_segcblist_is_offloaded(&rdp->cblist); rdp 2123 kernel/rcu/tree.c if (!rcu_segcblist_ready_cbs(&rdp->cblist)) { rdp 2125 kernel/rcu/tree.c rcu_segcblist_n_lazy_cbs(&rdp->cblist), rdp 2126 kernel/rcu/tree.c rcu_segcblist_n_cbs(&rdp->cblist), 0); rdp 2128 kernel/rcu/tree.c !rcu_segcblist_empty(&rdp->cblist), rdp 2140 kernel/rcu/tree.c rcu_nocb_lock(rdp); rdp 2142 kernel/rcu/tree.c pending = rcu_segcblist_n_cbs(&rdp->cblist); rdp 2143 kernel/rcu/tree.c bl = max(rdp->blimit, pending >> rcu_divisor); rdp 2147 kernel/rcu/tree.c rcu_segcblist_n_lazy_cbs(&rdp->cblist), rdp 2148 kernel/rcu/tree.c rcu_segcblist_n_cbs(&rdp->cblist), bl); rdp 2149 kernel/rcu/tree.c rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); rdp 2151 kernel/rcu/tree.c rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); rdp 2152 kernel/rcu/tree.c rcu_nocb_unlock_irqrestore(rdp, flags); rdp 2186 kernel/rcu/tree.c rcu_nocb_lock(rdp); rdp 2192 kernel/rcu/tree.c rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); rdp 2194 kernel/rcu/tree.c rcu_segcblist_insert_count(&rdp->cblist, &rcl); rdp 2197 kernel/rcu/tree.c count = rcu_segcblist_n_cbs(&rdp->cblist); rdp 2198 kernel/rcu/tree.c if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark) rdp 2199 kernel/rcu/tree.c rdp->blimit = blimit; rdp 2202 kernel/rcu/tree.c if (count == 0 && rdp->qlen_last_fqs_check != 0) { rdp 2203 kernel/rcu/tree.c rdp->qlen_last_fqs_check = 0; rdp 2204 kernel/rcu/tree.c rdp->n_force_qs_snap = rcu_state.n_force_qs; rdp 2205 kernel/rcu/tree.c } else if (count < rdp->qlen_last_fqs_check - qhimark) rdp 2206 kernel/rcu/tree.c rdp->qlen_last_fqs_check = count; rdp 2212 kernel/rcu/tree.c WARN_ON_ONCE(count == 0 && !rcu_segcblist_empty(&rdp->cblist)); rdp 2214 kernel/rcu/tree.c count != 0 && rcu_segcblist_empty(&rdp->cblist)); rdp 2216 kernel/rcu/tree.c rcu_nocb_unlock_irqrestore(rdp, flags); rdp 2219 kernel/rcu/tree.c if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist)) rdp 2258 kernel/rcu/tree.c static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) rdp 2343 kernel/rcu/tree.c struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); rdp 2344 kernel/rcu/tree.c struct rcu_node *rnp = rdp->mynode; rdp 2346 kernel/rcu/tree.c rcu_segcblist_is_offloaded(&rdp->cblist); rdp 2351 kernel/rcu/tree.c WARN_ON_ONCE(!rdp->beenonline); rdp 2362 kernel/rcu/tree.c rcu_check_quiescent_state(rdp); rdp 2366 kernel/rcu/tree.c rcu_segcblist_is_enabled(&rdp->cblist) && !offloaded) { rdp 2368 kernel/rcu/tree.c if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) rdp 2369 kernel/rcu/tree.c rcu_accelerate_cbs_unlocked(rnp, rdp); rdp 2373 kernel/rcu/tree.c rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check()); rdp 2376 kernel/rcu/tree.c if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist) && rdp 2378 kernel/rcu/tree.c rcu_do_batch(rdp); rdp 2381 kernel/rcu/tree.c do_nocb_deferred_wakeup(rdp); rdp 2500 kernel/rcu/tree.c static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head, rdp 2521 kernel/rcu/tree.c if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) > rdp 2522 kernel/rcu/tree.c rdp->qlen_last_fqs_check + qhimark)) { rdp 2525 kernel/rcu/tree.c note_gp_changes(rdp); rdp 2529 kernel/rcu/tree.c rcu_accelerate_cbs_unlocked(rdp->mynode, rdp); rdp 2532 kernel/rcu/tree.c rdp->blimit = DEFAULT_MAX_RCU_BLIMIT; rdp 2533 kernel/rcu/tree.c if (rcu_state.n_force_qs == rdp->n_force_qs_snap && rdp 2534 kernel/rcu/tree.c rcu_segcblist_first_pend_cb(&rdp->cblist) != head) rdp 2536 kernel/rcu/tree.c rdp->n_force_qs_snap = rcu_state.n_force_qs; rdp 2537 kernel/rcu/tree.c rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist); rdp 2559 kernel/rcu/tree.c struct rcu_data *rdp; rdp 2579 kernel/rcu/tree.c rdp = this_cpu_ptr(&rcu_data); rdp 2582 kernel/rcu/tree.c if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) { rdp 2588 kernel/rcu/tree.c if (rcu_segcblist_empty(&rdp->cblist)) rdp 2589 kernel/rcu/tree.c rcu_segcblist_init(&rdp->cblist); rdp 2591 kernel/rcu/tree.c if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags)) rdp 2594 kernel/rcu/tree.c rcu_segcblist_enqueue(&rdp->cblist, head, lazy); rdp 2598 kernel/rcu/tree.c rcu_segcblist_n_lazy_cbs(&rdp->cblist), rdp 2599 kernel/rcu/tree.c rcu_segcblist_n_cbs(&rdp->cblist)); rdp 2602 kernel/rcu/tree.c rcu_segcblist_n_lazy_cbs(&rdp->cblist), rdp 2603 kernel/rcu/tree.c rcu_segcblist_n_cbs(&rdp->cblist)); rdp 2607 kernel/rcu/tree.c unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) { rdp 2608 kernel/rcu/tree.c __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */ rdp 2610 kernel/rcu/tree.c __call_rcu_core(rdp, head, flags); rdp 2792 kernel/rcu/tree.c struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 2793 kernel/rcu/tree.c struct rcu_node *rnp = rdp->mynode; rdp 2796 kernel/rcu/tree.c check_cpu_stall(rdp); rdp 2799 kernel/rcu/tree.c if (rcu_nocb_need_deferred_wakeup(rdp)) rdp 2807 kernel/rcu/tree.c if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) rdp 2811 kernel/rcu/tree.c if (rcu_segcblist_ready_cbs(&rdp->cblist)) rdp 2816 kernel/rcu/tree.c rcu_segcblist_is_enabled(&rdp->cblist) && rdp 2818 kernel/rcu/tree.c !rcu_segcblist_is_offloaded(&rdp->cblist)) && rdp 2819 kernel/rcu/tree.c !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) rdp 2823 kernel/rcu/tree.c if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq || rdp 2824 kernel/rcu/tree.c unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */ rdp 2861 kernel/rcu/tree.c struct rcu_data *rdp = raw_cpu_ptr(&rcu_data); rdp 2864 kernel/rcu/tree.c rdp->barrier_head.func = rcu_barrier_callback; rdp 2865 kernel/rcu/tree.c debug_rcu_head_queue(&rdp->barrier_head); rdp 2866 kernel/rcu/tree.c rcu_nocb_lock(rdp); rdp 2867 kernel/rcu/tree.c WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies)); rdp 2868 kernel/rcu/tree.c if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) { rdp 2871 kernel/rcu/tree.c debug_rcu_head_unqueue(&rdp->barrier_head); rdp 2875 kernel/rcu/tree.c rcu_nocb_unlock(rdp); rdp 2889 kernel/rcu/tree.c struct rcu_data *rdp; rdp 2926 kernel/rcu/tree.c rdp = per_cpu_ptr(&rcu_data, cpu); rdp 2928 kernel/rcu/tree.c !rcu_segcblist_is_offloaded(&rdp->cblist)) rdp 2930 kernel/rcu/tree.c if (rcu_segcblist_n_cbs(&rdp->cblist)) { rdp 2994 kernel/rcu/tree.c struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); rdp 2997 kernel/rcu/tree.c rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu); rdp 2998 kernel/rcu/tree.c WARN_ON_ONCE(rdp->dynticks_nesting != 1); rdp 2999 kernel/rcu/tree.c WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp))); rdp 3000 kernel/rcu/tree.c rdp->rcu_ofl_gp_seq = rcu_state.gp_seq; rdp 3001 kernel/rcu/tree.c rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED; rdp 3002 kernel/rcu/tree.c rdp->rcu_onl_gp_seq = rcu_state.gp_seq; rdp 3003 kernel/rcu/tree.c rdp->rcu_onl_gp_flags = RCU_GP_CLEANED; rdp 3004 kernel/rcu/tree.c rdp->cpu = cpu; rdp 3005 kernel/rcu/tree.c rcu_boot_init_nocb_percpu_data(rdp); rdp 3021 kernel/rcu/tree.c struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); rdp 3026 kernel/rcu/tree.c rdp->qlen_last_fqs_check = 0; rdp 3027 kernel/rcu/tree.c rdp->n_force_qs_snap = rcu_state.n_force_qs; rdp 3028 kernel/rcu/tree.c rdp->blimit = blimit; rdp 3029 kernel/rcu/tree.c if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */ rdp 3030 kernel/rcu/tree.c !rcu_segcblist_is_offloaded(&rdp->cblist)) rdp 3031 kernel/rcu/tree.c rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */ rdp 3032 kernel/rcu/tree.c rdp->dynticks_nesting = 1; /* CPU not up, no tearing. */ rdp 3041 kernel/rcu/tree.c rnp = rdp->mynode; rdp 3043 kernel/rcu/tree.c rdp->beenonline = true; /* We have now been online. */ rdp 3044 kernel/rcu/tree.c rdp->gp_seq = rnp->gp_seq; rdp 3045 kernel/rcu/tree.c rdp->gp_seq_needed = rnp->gp_seq; rdp 3046 kernel/rcu/tree.c rdp->cpu_no_qs.b.norm = true; rdp 3047 kernel/rcu/tree.c rdp->core_needs_qs = false; rdp 3048 kernel/rcu/tree.c rdp->rcu_iw_pending = false; rdp 3049 kernel/rcu/tree.c rdp->rcu_iw_gp_seq = rnp->gp_seq - 1; rdp 3050 kernel/rcu/tree.c trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); rdp 3063 kernel/rcu/tree.c struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); rdp 3065 kernel/rcu/tree.c rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); rdp 3075 kernel/rcu/tree.c struct rcu_data *rdp; rdp 3078 kernel/rcu/tree.c rdp = per_cpu_ptr(&rcu_data, cpu); rdp 3079 kernel/rcu/tree.c rnp = rdp->mynode; rdp 3081 kernel/rcu/tree.c rnp->ffmask |= rdp->grpmask; rdp 3097 kernel/rcu/tree.c struct rcu_data *rdp; rdp 3100 kernel/rcu/tree.c rdp = per_cpu_ptr(&rcu_data, cpu); rdp 3101 kernel/rcu/tree.c rnp = rdp->mynode; rdp 3103 kernel/rcu/tree.c rnp->ffmask &= ~rdp->grpmask; rdp 3129 kernel/rcu/tree.c struct rcu_data *rdp; rdp 3137 kernel/rcu/tree.c rdp = per_cpu_ptr(&rcu_data, cpu); rdp 3138 kernel/rcu/tree.c rnp = rdp->mynode; rdp 3139 kernel/rcu/tree.c mask = rdp->grpmask; rdp 3148 kernel/rcu/tree.c rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */ rdp 3149 kernel/rcu/tree.c rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq); rdp 3150 kernel/rcu/tree.c rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags); rdp 3173 kernel/rcu/tree.c struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); rdp 3174 kernel/rcu/tree.c struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ rdp 3183 kernel/rcu/tree.c mask = rdp->grpmask; rdp 3186 kernel/rcu/tree.c rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq); rdp 3187 kernel/rcu/tree.c rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags); rdp 3210 kernel/rcu/tree.c struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); rdp 3213 kernel/rcu/tree.c if (rcu_segcblist_is_offloaded(&rdp->cblist) || rdp 3214 kernel/rcu/tree.c rcu_segcblist_empty(&rdp->cblist)) rdp 3224 kernel/rcu/tree.c needwake = rcu_advance_cbs(my_rnp, rdp) || rdp 3226 kernel/rcu/tree.c rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist); rdp 3228 kernel/rcu/tree.c rcu_segcblist_disable(&rdp->cblist); rdp 3241 kernel/rcu/tree.c WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 || rdp 3242 kernel/rcu/tree.c !rcu_segcblist_empty(&rdp->cblist), rdp 3244 kernel/rcu/tree.c cpu, rcu_segcblist_n_cbs(&rdp->cblist), rdp 3245 kernel/rcu/tree.c rcu_segcblist_first_cb(&rdp->cblist)); rdp 405 kernel/rcu/tree.h int rcu_dynticks_snap(struct rcu_data *rdp); rdp 430 kernel/rcu/tree.h static void zero_cpu_stall_ticks(struct rcu_data *rdp); rdp 434 kernel/rcu/tree.h static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, rdp 436 kernel/rcu/tree.h static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, rdp 438 kernel/rcu/tree.h static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty, rdp 440 kernel/rcu/tree.h static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp); rdp 441 kernel/rcu/tree.h static void do_nocb_deferred_wakeup(struct rcu_data *rdp); rdp 442 kernel/rcu/tree.h static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); rdp 445 kernel/rcu/tree.h static void show_rcu_nocb_state(struct rcu_data *rdp); rdp 446 kernel/rcu/tree.h static void rcu_nocb_lock(struct rcu_data *rdp); rdp 447 kernel/rcu/tree.h static void rcu_nocb_unlock(struct rcu_data *rdp); rdp 448 kernel/rcu/tree.h static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, rdp 450 kernel/rcu/tree.h static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp); rdp 453 kernel/rcu/tree.h #define rcu_nocb_lock_irqsave(rdp, flags) \ rdp 455 kernel/rcu/tree.h if (!rcu_segcblist_is_offloaded(&(rdp)->cblist)) \ rdp 458 kernel/rcu/tree.h raw_spin_lock_irqsave(&(rdp)->nocb_lock, (flags)); \ rdp 461 kernel/rcu/tree.h #define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags) rdp 472 kernel/rcu/tree.h static void check_cpu_stall(struct rcu_data *rdp); rdp 473 kernel/rcu/tree.h static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, rdp 251 kernel/rcu/tree_exp.h static void rcu_report_exp_rdp(struct rcu_data *rdp) rdp 253 kernel/rcu/tree_exp.h WRITE_ONCE(rdp->exp_deferred_qs, false); rdp 254 kernel/rcu/tree_exp.h rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); rdp 277 kernel/rcu/tree_exp.h struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); rdp 278 kernel/rcu/tree_exp.h struct rcu_node *rnp = rdp->mynode; rdp 349 kernel/rcu/tree_exp.h struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); rdp 356 kernel/rcu/tree_exp.h snap = rcu_dynticks_snap(rdp); rdp 360 kernel/rcu/tree_exp.h rdp->exp_dynticks_snap = snap; rdp 377 kernel/rcu/tree_exp.h struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); rdp 380 kernel/rcu/tree_exp.h if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) { rdp 489 kernel/rcu/tree_exp.h struct rcu_data *rdp; rdp 495 kernel/rcu/tree_exp.h rdp = per_cpu_ptr(&rcu_data, cpu); rdp 498 kernel/rcu/tree_exp.h "o."[!!(rdp->grpmask & rnp->expmaskinit)], rdp 499 kernel/rcu/tree_exp.h "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]); rdp 602 kernel/rcu/tree_exp.h struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 603 kernel/rcu/tree_exp.h struct rcu_node *rnp = rdp->mynode; rdp 614 kernel/rcu/tree_exp.h rcu_report_exp_rdp(rdp); rdp 616 kernel/rcu/tree_exp.h rdp->exp_deferred_qs = true; rdp 637 kernel/rcu/tree_exp.h if (rnp->expmask & rdp->grpmask) { rdp 638 kernel/rcu/tree_exp.h rdp->exp_deferred_qs = true; rdp 661 kernel/rcu/tree_exp.h rdp->exp_deferred_qs = true; rdp 712 kernel/rcu/tree_exp.h struct rcu_data *rdp; rdp 715 kernel/rcu/tree_exp.h rdp = this_cpu_ptr(&rcu_data); rdp 716 kernel/rcu/tree_exp.h rnp = rdp->mynode; rdp 717 kernel/rcu/tree_exp.h if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || rdp 732 kernel/rcu/tree_exp.h struct rcu_data *rdp; rdp 736 kernel/rcu/tree_exp.h rdp = per_cpu_ptr(&rcu_data, cpu); rdp 737 kernel/rcu/tree_exp.h rnp = rdp->mynode; rdp 740 kernel/rcu/tree_exp.h if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || rdp 130 kernel/rcu/tree_plugin.h static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp) rdp 135 kernel/rcu/tree_plugin.h (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + rdp 136 kernel/rcu/tree_plugin.h (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); rdp 140 kernel/rcu/tree_plugin.h WARN_ON_ONCE(rdp->mynode != rnp); rdp 144 kernel/rcu/tree_plugin.h rdp->grpmask); rdp 229 kernel/rcu/tree_plugin.h !(rnp->qsmask & rdp->grpmask)); rdp 231 kernel/rcu/tree_plugin.h !(rnp->expmask & rdp->grpmask)); rdp 240 kernel/rcu/tree_plugin.h if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs) rdp 241 kernel/rcu/tree_plugin.h rcu_report_exp_rdp(rdp); rdp 243 kernel/rcu/tree_plugin.h WARN_ON_ONCE(rdp->exp_deferred_qs); rdp 288 kernel/rcu/tree_plugin.h struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 298 kernel/rcu/tree_plugin.h rnp = rdp->mynode; rdp 308 kernel/rcu/tree_plugin.h WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0); rdp 312 kernel/rcu/tree_plugin.h (rnp->qsmask & rdp->grpmask) rdp 315 kernel/rcu/tree_plugin.h rcu_preempt_ctxt_queue(rnp, rdp); rdp 330 kernel/rcu/tree_plugin.h if (rdp->exp_deferred_qs) rdp 331 kernel/rcu/tree_plugin.h rcu_report_exp_rdp(rdp); rdp 432 kernel/rcu/tree_plugin.h struct rcu_data *rdp; rdp 442 kernel/rcu/tree_plugin.h rdp = this_cpu_ptr(&rcu_data); rdp 443 kernel/rcu/tree_plugin.h if (!special.s && !rdp->exp_deferred_qs) { rdp 451 kernel/rcu/tree_plugin.h if (!t->rcu_read_unlock_special.s && !rdp->exp_deferred_qs) { rdp 463 kernel/rcu/tree_plugin.h if (rdp->exp_deferred_qs) { rdp 464 kernel/rcu/tree_plugin.h rcu_report_exp_rdp(rdp); rdp 584 kernel/rcu/tree_plugin.h struct rcu_data *rdp; rdp 586 kernel/rcu/tree_plugin.h rdp = container_of(iwp, struct rcu_data, defer_qs_iw); rdp 587 kernel/rcu/tree_plugin.h rdp->defer_qs_iw_pending = false; rdp 610 kernel/rcu/tree_plugin.h struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 611 kernel/rcu/tree_plugin.h struct rcu_node *rnp = rdp->mynode; rdp 615 kernel/rcu/tree_plugin.h (rdp->grpmask & READ_ONCE(rnp->expmask)) || rdp 616 kernel/rcu/tree_plugin.h tick_nohz_full_cpu(rdp->cpu); rdp 630 kernel/rcu/tree_plugin.h !rdp->defer_qs_iw_pending && exp) { rdp 633 kernel/rcu/tree_plugin.h init_irq_work(&rdp->defer_qs_iw, rdp 635 kernel/rcu/tree_plugin.h rdp->defer_qs_iw_pending = true; rdp 636 kernel/rcu/tree_plugin.h irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); rdp 749 kernel/rcu/tree_plugin.h struct rcu_data *rdp; rdp 771 kernel/rcu/tree_plugin.h rdp = per_cpu_ptr(&rcu_data, cpu); rdp 772 kernel/rcu/tree_plugin.h onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp)); rdp 775 kernel/rcu/tree_plugin.h (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, rdp 776 kernel/rcu/tree_plugin.h (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); rdp 1188 kernel/rcu/tree_plugin.h struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); rdp 1189 kernel/rcu/tree_plugin.h struct rcu_node *rnp = rdp->mynode; rdp 1302 kernel/rcu/tree_plugin.h struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 1306 kernel/rcu/tree_plugin.h if (jiffies == rdp->last_advance_all) rdp 1308 kernel/rcu/tree_plugin.h rdp->last_advance_all = jiffies; rdp 1310 kernel/rcu/tree_plugin.h rnp = rdp->mynode; rdp 1317 kernel/rcu/tree_plugin.h if ((rcu_seq_completed_gp(rdp->gp_seq, rdp 1319 kernel/rcu/tree_plugin.h unlikely(READ_ONCE(rdp->gpwrap))) && rdp 1320 kernel/rcu/tree_plugin.h rcu_segcblist_pend_cbs(&rdp->cblist)) rdp 1321 kernel/rcu/tree_plugin.h note_gp_changes(rdp); rdp 1323 kernel/rcu/tree_plugin.h if (rcu_segcblist_ready_cbs(&rdp->cblist)) rdp 1338 kernel/rcu/tree_plugin.h struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 1344 kernel/rcu/tree_plugin.h if (rcu_segcblist_empty(&rdp->cblist) || rdp 1356 kernel/rcu/tree_plugin.h rdp->last_accelerate = jiffies; rdp 1359 kernel/rcu/tree_plugin.h rdp->all_lazy = !rcu_segcblist_n_nonlazy_cbs(&rdp->cblist); rdp 1360 kernel/rcu/tree_plugin.h if (rdp->all_lazy) { rdp 1383 kernel/rcu/tree_plugin.h struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 1388 kernel/rcu/tree_plugin.h if (rcu_segcblist_is_offloaded(&rdp->cblist)) rdp 1393 kernel/rcu/tree_plugin.h if (tne != rdp->tick_nohz_enabled_snap) { rdp 1394 kernel/rcu/tree_plugin.h if (!rcu_segcblist_empty(&rdp->cblist)) rdp 1396 kernel/rcu/tree_plugin.h rdp->tick_nohz_enabled_snap = tne; rdp 1407 kernel/rcu/tree_plugin.h if (rdp->all_lazy && rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)) { rdp 1408 kernel/rcu/tree_plugin.h rdp->all_lazy = false; rdp 1417 kernel/rcu/tree_plugin.h if (rdp->last_accelerate == jiffies) rdp 1419 kernel/rcu/tree_plugin.h rdp->last_accelerate = jiffies; rdp 1420 kernel/rcu/tree_plugin.h if (rcu_segcblist_pend_cbs(&rdp->cblist)) { rdp 1421 kernel/rcu/tree_plugin.h rnp = rdp->mynode; rdp 1423 kernel/rcu/tree_plugin.h needwake = rcu_accelerate_cbs(rnp, rdp); rdp 1437 kernel/rcu/tree_plugin.h struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 1440 kernel/rcu/tree_plugin.h if (rcu_segcblist_is_offloaded(&rdp->cblist)) rdp 1513 kernel/rcu/tree_plugin.h static void rcu_nocb_bypass_lock(struct rcu_data *rdp) rdp 1516 kernel/rcu/tree_plugin.h if (raw_spin_trylock(&rdp->nocb_bypass_lock)) rdp 1518 kernel/rcu/tree_plugin.h atomic_inc(&rdp->nocb_lock_contended); rdp 1519 kernel/rcu/tree_plugin.h WARN_ON_ONCE(smp_processor_id() != rdp->cpu); rdp 1521 kernel/rcu/tree_plugin.h raw_spin_lock(&rdp->nocb_bypass_lock); rdp 1523 kernel/rcu/tree_plugin.h atomic_dec(&rdp->nocb_lock_contended); rdp 1536 kernel/rcu/tree_plugin.h static void rcu_nocb_wait_contended(struct rcu_data *rdp) rdp 1538 kernel/rcu/tree_plugin.h WARN_ON_ONCE(smp_processor_id() != rdp->cpu); rdp 1539 kernel/rcu/tree_plugin.h while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended))) rdp 1547 kernel/rcu/tree_plugin.h static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp) rdp 1550 kernel/rcu/tree_plugin.h return raw_spin_trylock(&rdp->nocb_bypass_lock); rdp 1556 kernel/rcu/tree_plugin.h static void rcu_nocb_bypass_unlock(struct rcu_data *rdp) rdp 1559 kernel/rcu/tree_plugin.h raw_spin_unlock(&rdp->nocb_bypass_lock); rdp 1566 kernel/rcu/tree_plugin.h static void rcu_nocb_lock(struct rcu_data *rdp) rdp 1569 kernel/rcu/tree_plugin.h if (!rcu_segcblist_is_offloaded(&rdp->cblist)) rdp 1571 kernel/rcu/tree_plugin.h raw_spin_lock(&rdp->nocb_lock); rdp 1578 kernel/rcu/tree_plugin.h static void rcu_nocb_unlock(struct rcu_data *rdp) rdp 1580 kernel/rcu/tree_plugin.h if (rcu_segcblist_is_offloaded(&rdp->cblist)) { rdp 1582 kernel/rcu/tree_plugin.h raw_spin_unlock(&rdp->nocb_lock); rdp 1590 kernel/rcu/tree_plugin.h static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, rdp 1593 kernel/rcu/tree_plugin.h if (rcu_segcblist_is_offloaded(&rdp->cblist)) { rdp 1595 kernel/rcu/tree_plugin.h raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); rdp 1602 kernel/rcu/tree_plugin.h static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp) rdp 1605 kernel/rcu/tree_plugin.h if (rcu_segcblist_is_offloaded(&rdp->cblist) && rdp 1606 kernel/rcu/tree_plugin.h cpu_online(rdp->cpu)) rdp 1607 kernel/rcu/tree_plugin.h lockdep_assert_held(&rdp->nocb_lock); rdp 1642 kernel/rcu/tree_plugin.h static void wake_nocb_gp(struct rcu_data *rdp, bool force, rdp 1644 kernel/rcu/tree_plugin.h __releases(rdp->nocb_lock) rdp 1647 kernel/rcu/tree_plugin.h struct rcu_data *rdp_gp = rdp->nocb_gp_rdp; rdp 1649 kernel/rcu/tree_plugin.h lockdep_assert_held(&rdp->nocb_lock); rdp 1651 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, rdp 1653 kernel/rcu/tree_plugin.h rcu_nocb_unlock_irqrestore(rdp, flags); rdp 1656 kernel/rcu/tree_plugin.h del_timer(&rdp->nocb_timer); rdp 1657 kernel/rcu/tree_plugin.h rcu_nocb_unlock_irqrestore(rdp, flags); rdp 1662 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake")); rdp 1673 kernel/rcu/tree_plugin.h static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype, rdp 1676 kernel/rcu/tree_plugin.h if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) rdp 1677 kernel/rcu/tree_plugin.h mod_timer(&rdp->nocb_timer, jiffies + 1); rdp 1678 kernel/rcu/tree_plugin.h if (rdp->nocb_defer_wakeup < waketype) rdp 1679 kernel/rcu/tree_plugin.h WRITE_ONCE(rdp->nocb_defer_wakeup, waketype); rdp 1680 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason); rdp 1691 kernel/rcu/tree_plugin.h static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, rdp 1696 kernel/rcu/tree_plugin.h WARN_ON_ONCE(!rcu_segcblist_is_offloaded(&rdp->cblist)); rdp 1697 kernel/rcu/tree_plugin.h rcu_lockdep_assert_cblist_protected(rdp); rdp 1698 kernel/rcu/tree_plugin.h lockdep_assert_held(&rdp->nocb_bypass_lock); rdp 1699 kernel/rcu/tree_plugin.h if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) { rdp 1700 kernel/rcu/tree_plugin.h raw_spin_unlock(&rdp->nocb_bypass_lock); rdp 1705 kernel/rcu/tree_plugin.h rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */ rdp 1706 kernel/rcu/tree_plugin.h rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp); rdp 1707 kernel/rcu/tree_plugin.h rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl); rdp 1708 kernel/rcu/tree_plugin.h WRITE_ONCE(rdp->nocb_bypass_first, j); rdp 1709 kernel/rcu/tree_plugin.h rcu_nocb_bypass_unlock(rdp); rdp 1721 kernel/rcu/tree_plugin.h static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, rdp 1724 kernel/rcu/tree_plugin.h if (!rcu_segcblist_is_offloaded(&rdp->cblist)) rdp 1726 kernel/rcu/tree_plugin.h rcu_lockdep_assert_cblist_protected(rdp); rdp 1727 kernel/rcu/tree_plugin.h rcu_nocb_bypass_lock(rdp); rdp 1728 kernel/rcu/tree_plugin.h return rcu_nocb_do_flush_bypass(rdp, rhp, j); rdp 1735 kernel/rcu/tree_plugin.h static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j) rdp 1737 kernel/rcu/tree_plugin.h rcu_lockdep_assert_cblist_protected(rdp); rdp 1738 kernel/rcu/tree_plugin.h if (!rcu_segcblist_is_offloaded(&rdp->cblist) || rdp 1739 kernel/rcu/tree_plugin.h !rcu_nocb_bypass_trylock(rdp)) rdp 1741 kernel/rcu/tree_plugin.h WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j)); rdp 1762 kernel/rcu/tree_plugin.h static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, rdp 1768 kernel/rcu/tree_plugin.h long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); rdp 1770 kernel/rcu/tree_plugin.h if (!rcu_segcblist_is_offloaded(&rdp->cblist)) { rdp 1771 kernel/rcu/tree_plugin.h *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); rdp 1778 kernel/rcu/tree_plugin.h rcu_nocb_lock(rdp); rdp 1779 kernel/rcu/tree_plugin.h WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); rdp 1780 kernel/rcu/tree_plugin.h *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); rdp 1786 kernel/rcu/tree_plugin.h if (j == rdp->nocb_nobypass_last) { rdp 1787 kernel/rcu/tree_plugin.h c = rdp->nocb_nobypass_count + 1; rdp 1789 kernel/rcu/tree_plugin.h WRITE_ONCE(rdp->nocb_nobypass_last, j); rdp 1790 kernel/rcu/tree_plugin.h c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy; rdp 1791 kernel/rcu/tree_plugin.h if (ULONG_CMP_LT(rdp->nocb_nobypass_count, rdp 1797 kernel/rcu/tree_plugin.h WRITE_ONCE(rdp->nocb_nobypass_count, c); rdp 1802 kernel/rcu/tree_plugin.h if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) { rdp 1803 kernel/rcu/tree_plugin.h rcu_nocb_lock(rdp); rdp 1804 kernel/rcu/tree_plugin.h *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); rdp 1806 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, rdp 1808 kernel/rcu/tree_plugin.h WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j)); rdp 1809 kernel/rcu/tree_plugin.h WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); rdp 1815 kernel/rcu/tree_plugin.h if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) || rdp 1817 kernel/rcu/tree_plugin.h rcu_nocb_lock(rdp); rdp 1818 kernel/rcu/tree_plugin.h if (!rcu_nocb_flush_bypass(rdp, rhp, j)) { rdp 1819 kernel/rcu/tree_plugin.h *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist); rdp 1821 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, rdp 1823 kernel/rcu/tree_plugin.h WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass)); rdp 1826 kernel/rcu/tree_plugin.h if (j != rdp->nocb_gp_adv_time && rdp 1827 kernel/rcu/tree_plugin.h rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && rdp 1828 kernel/rcu/tree_plugin.h rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { rdp 1829 kernel/rcu/tree_plugin.h rcu_advance_cbs_nowake(rdp->mynode, rdp); rdp 1830 kernel/rcu/tree_plugin.h rdp->nocb_gp_adv_time = j; rdp 1832 kernel/rcu/tree_plugin.h rcu_nocb_unlock_irqrestore(rdp, flags); rdp 1837 kernel/rcu/tree_plugin.h rcu_nocb_wait_contended(rdp); rdp 1838 kernel/rcu/tree_plugin.h rcu_nocb_bypass_lock(rdp); rdp 1839 kernel/rcu/tree_plugin.h ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); rdp 1840 kernel/rcu/tree_plugin.h rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */ rdp 1841 kernel/rcu/tree_plugin.h rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); rdp 1843 kernel/rcu/tree_plugin.h WRITE_ONCE(rdp->nocb_bypass_first, j); rdp 1844 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ")); rdp 1846 kernel/rcu/tree_plugin.h rcu_nocb_bypass_unlock(rdp); rdp 1852 kernel/rcu/tree_plugin.h rcu_nocb_lock(rdp); // Rare during call_rcu() flood. rdp 1853 kernel/rcu/tree_plugin.h if (!rcu_segcblist_pend_cbs(&rdp->cblist)) { rdp 1854 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, rdp 1856 kernel/rcu/tree_plugin.h __call_rcu_nocb_wake(rdp, true, flags); rdp 1858 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, rdp 1860 kernel/rcu/tree_plugin.h rcu_nocb_unlock_irqrestore(rdp, flags); rdp 1872 kernel/rcu/tree_plugin.h static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone, rdp 1874 kernel/rcu/tree_plugin.h __releases(rdp->nocb_lock) rdp 1882 kernel/rcu/tree_plugin.h t = READ_ONCE(rdp->nocb_gp_kthread); rdp 1884 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, rdp 1886 kernel/rcu/tree_plugin.h rcu_nocb_unlock_irqrestore(rdp, flags); rdp 1890 kernel/rcu/tree_plugin.h len = rcu_segcblist_n_cbs(&rdp->cblist); rdp 1892 kernel/rcu/tree_plugin.h rdp->qlen_last_fqs_check = len; rdp 1895 kernel/rcu/tree_plugin.h wake_nocb_gp(rdp, false, flags); rdp 1896 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, rdp 1899 kernel/rcu/tree_plugin.h wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE, rdp 1901 kernel/rcu/tree_plugin.h rcu_nocb_unlock_irqrestore(rdp, flags); rdp 1903 kernel/rcu/tree_plugin.h } else if (len > rdp->qlen_last_fqs_check + qhimark) { rdp 1905 kernel/rcu/tree_plugin.h rdp->qlen_last_fqs_check = len; rdp 1907 kernel/rcu/tree_plugin.h if (j != rdp->nocb_gp_adv_time && rdp 1908 kernel/rcu/tree_plugin.h rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && rdp 1909 kernel/rcu/tree_plugin.h rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) { rdp 1910 kernel/rcu/tree_plugin.h rcu_advance_cbs_nowake(rdp->mynode, rdp); rdp 1911 kernel/rcu/tree_plugin.h rdp->nocb_gp_adv_time = j; rdp 1914 kernel/rcu/tree_plugin.h if ((rdp->nocb_cb_sleep || rdp 1915 kernel/rcu/tree_plugin.h !rcu_segcblist_ready_cbs(&rdp->cblist)) && rdp 1916 kernel/rcu/tree_plugin.h !timer_pending(&rdp->nocb_bypass_timer)) rdp 1917 kernel/rcu/tree_plugin.h wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE, rdp 1919 kernel/rcu/tree_plugin.h rcu_nocb_unlock_irqrestore(rdp, flags); rdp 1921 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot")); rdp 1922 kernel/rcu/tree_plugin.h rcu_nocb_unlock_irqrestore(rdp, flags); rdp 1931 kernel/rcu/tree_plugin.h struct rcu_data *rdp = from_timer(rdp, t, nocb_bypass_timer); rdp 1933 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer")); rdp 1934 kernel/rcu/tree_plugin.h rcu_nocb_lock_irqsave(rdp, flags); rdp 1936 kernel/rcu/tree_plugin.h __call_rcu_nocb_wake(rdp, true, flags); rdp 1955 kernel/rcu/tree_plugin.h struct rcu_data *rdp; rdp 1964 kernel/rcu/tree_plugin.h for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) { rdp 1965 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check")); rdp 1966 kernel/rcu/tree_plugin.h rcu_nocb_lock_irqsave(rdp, flags); rdp 1967 kernel/rcu/tree_plugin.h bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); rdp 1969 kernel/rcu/tree_plugin.h (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) || rdp 1972 kernel/rcu/tree_plugin.h (void)rcu_nocb_try_flush_bypass(rdp, j); rdp 1973 kernel/rcu/tree_plugin.h bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass); rdp 1974 kernel/rcu/tree_plugin.h } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) { rdp 1975 kernel/rcu/tree_plugin.h rcu_nocb_unlock_irqrestore(rdp, flags); rdp 1979 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, rdp 1983 kernel/rcu/tree_plugin.h rnp = rdp->mynode; rdp 1991 kernel/rcu/tree_plugin.h if (!rcu_segcblist_restempty(&rdp->cblist, rdp 1993 kernel/rcu/tree_plugin.h (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && rdp 1996 kernel/rcu/tree_plugin.h needwake_gp = rcu_advance_cbs(rnp, rdp); rdp 2000 kernel/rcu/tree_plugin.h WARN_ON_ONCE(!rcu_segcblist_restempty(&rdp->cblist, rdp 2002 kernel/rcu/tree_plugin.h if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) { rdp 2007 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, rdp 2010 kernel/rcu/tree_plugin.h if (rcu_segcblist_ready_cbs(&rdp->cblist)) { rdp 2011 kernel/rcu/tree_plugin.h needwake = rdp->nocb_cb_sleep; rdp 2012 kernel/rcu/tree_plugin.h WRITE_ONCE(rdp->nocb_cb_sleep, false); rdp 2017 kernel/rcu/tree_plugin.h rcu_nocb_unlock_irqrestore(rdp, flags); rdp 2019 kernel/rcu/tree_plugin.h swake_up_one(&rdp->nocb_cb_wq); rdp 2077 kernel/rcu/tree_plugin.h struct rcu_data *rdp = arg; rdp 2080 kernel/rcu/tree_plugin.h WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1); rdp 2081 kernel/rcu/tree_plugin.h nocb_gp_wait(rdp); rdp 2091 kernel/rcu/tree_plugin.h static void nocb_cb_wait(struct rcu_data *rdp) rdp 2096 kernel/rcu/tree_plugin.h struct rcu_node *rnp = rdp->mynode; rdp 2102 kernel/rcu/tree_plugin.h rcu_do_batch(rdp); rdp 2105 kernel/rcu/tree_plugin.h rcu_nocb_lock_irqsave(rdp, flags); rdp 2106 kernel/rcu/tree_plugin.h if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) && rdp 2109 kernel/rcu/tree_plugin.h needwake_gp = rcu_advance_cbs(rdp->mynode, rdp); rdp 2112 kernel/rcu/tree_plugin.h if (rcu_segcblist_ready_cbs(&rdp->cblist)) { rdp 2113 kernel/rcu/tree_plugin.h rcu_nocb_unlock_irqrestore(rdp, flags); rdp 2119 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep")); rdp 2120 kernel/rcu/tree_plugin.h WRITE_ONCE(rdp->nocb_cb_sleep, true); rdp 2121 kernel/rcu/tree_plugin.h rcu_nocb_unlock_irqrestore(rdp, flags); rdp 2124 kernel/rcu/tree_plugin.h swait_event_interruptible_exclusive(rdp->nocb_cb_wq, rdp 2125 kernel/rcu/tree_plugin.h !READ_ONCE(rdp->nocb_cb_sleep)); rdp 2126 kernel/rcu/tree_plugin.h if (!smp_load_acquire(&rdp->nocb_cb_sleep)) { /* VVV */ rdp 2131 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); rdp 2140 kernel/rcu/tree_plugin.h struct rcu_data *rdp = arg; rdp 2145 kernel/rcu/tree_plugin.h nocb_cb_wait(rdp); rdp 2152 kernel/rcu/tree_plugin.h static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) rdp 2154 kernel/rcu/tree_plugin.h return READ_ONCE(rdp->nocb_defer_wakeup); rdp 2158 kernel/rcu/tree_plugin.h static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp) rdp 2163 kernel/rcu/tree_plugin.h rcu_nocb_lock_irqsave(rdp, flags); rdp 2164 kernel/rcu/tree_plugin.h if (!rcu_nocb_need_deferred_wakeup(rdp)) { rdp 2165 kernel/rcu/tree_plugin.h rcu_nocb_unlock_irqrestore(rdp, flags); rdp 2168 kernel/rcu/tree_plugin.h ndw = READ_ONCE(rdp->nocb_defer_wakeup); rdp 2169 kernel/rcu/tree_plugin.h WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT); rdp 2170 kernel/rcu/tree_plugin.h wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags); rdp 2171 kernel/rcu/tree_plugin.h trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake")); rdp 2177 kernel/rcu/tree_plugin.h struct rcu_data *rdp = from_timer(rdp, t, nocb_timer); rdp 2179 kernel/rcu/tree_plugin.h do_nocb_deferred_wakeup_common(rdp); rdp 2187 kernel/rcu/tree_plugin.h static void do_nocb_deferred_wakeup(struct rcu_data *rdp) rdp 2189 kernel/rcu/tree_plugin.h if (rcu_nocb_need_deferred_wakeup(rdp)) rdp 2190 kernel/rcu/tree_plugin.h do_nocb_deferred_wakeup_common(rdp); rdp 2197 kernel/rcu/tree_plugin.h struct rcu_data *rdp; rdp 2232 kernel/rcu/tree_plugin.h rdp = per_cpu_ptr(&rcu_data, cpu); rdp 2233 kernel/rcu/tree_plugin.h if (rcu_segcblist_empty(&rdp->cblist)) rdp 2234 kernel/rcu/tree_plugin.h rcu_segcblist_init(&rdp->cblist); rdp 2235 kernel/rcu/tree_plugin.h rcu_segcblist_offload(&rdp->cblist); rdp 2241 kernel/rcu/tree_plugin.h static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) rdp 2243 kernel/rcu/tree_plugin.h init_swait_queue_head(&rdp->nocb_cb_wq); rdp 2244 kernel/rcu/tree_plugin.h init_swait_queue_head(&rdp->nocb_gp_wq); rdp 2245 kernel/rcu/tree_plugin.h raw_spin_lock_init(&rdp->nocb_lock); rdp 2246 kernel/rcu/tree_plugin.h raw_spin_lock_init(&rdp->nocb_bypass_lock); rdp 2247 kernel/rcu/tree_plugin.h raw_spin_lock_init(&rdp->nocb_gp_lock); rdp 2248 kernel/rcu/tree_plugin.h timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0); rdp 2249 kernel/rcu/tree_plugin.h timer_setup(&rdp->nocb_bypass_timer, do_nocb_bypass_wakeup_timer, 0); rdp 2250 kernel/rcu/tree_plugin.h rcu_cblist_init(&rdp->nocb_bypass); rdp 2260 kernel/rcu/tree_plugin.h struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); rdp 2268 kernel/rcu/tree_plugin.h if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread) rdp 2272 kernel/rcu/tree_plugin.h rdp_gp = rdp->nocb_gp_rdp; rdp 2282 kernel/rcu/tree_plugin.h t = kthread_run(rcu_nocb_cb_kthread, rdp, rdp 2286 kernel/rcu/tree_plugin.h WRITE_ONCE(rdp->nocb_cb_kthread, t); rdp 2287 kernel/rcu/tree_plugin.h WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread); rdp 2329 kernel/rcu/tree_plugin.h struct rcu_data *rdp; rdp 2346 kernel/rcu/tree_plugin.h rdp = per_cpu_ptr(&rcu_data, cpu); rdp 2347 kernel/rcu/tree_plugin.h if (rdp->cpu >= nl) { rdp 2350 kernel/rcu/tree_plugin.h nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls; rdp 2351 kernel/rcu/tree_plugin.h rdp->nocb_gp_rdp = rdp; rdp 2352 kernel/rcu/tree_plugin.h rdp_gp = rdp; rdp 2365 kernel/rcu/tree_plugin.h rdp->nocb_gp_rdp = rdp_gp; rdp 2366 kernel/rcu/tree_plugin.h rdp_prev->nocb_next_cb_rdp = rdp; rdp 2370 kernel/rcu/tree_plugin.h rdp_prev = rdp; rdp 2391 kernel/rcu/tree_plugin.h static void show_rcu_nocb_gp_state(struct rcu_data *rdp) rdp 2393 kernel/rcu/tree_plugin.h struct rcu_node *rnp = rdp->mynode; rdp 2396 kernel/rcu/tree_plugin.h rdp->cpu, rdp 2397 kernel/rcu/tree_plugin.h "kK"[!!rdp->nocb_gp_kthread], rdp 2398 kernel/rcu/tree_plugin.h "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)], rdp 2399 kernel/rcu/tree_plugin.h "dD"[!!rdp->nocb_defer_wakeup], rdp 2400 kernel/rcu/tree_plugin.h "tT"[timer_pending(&rdp->nocb_timer)], rdp 2401 kernel/rcu/tree_plugin.h "bB"[timer_pending(&rdp->nocb_bypass_timer)], rdp 2402 kernel/rcu/tree_plugin.h "sS"[!!rdp->nocb_gp_sleep], rdp 2403 kernel/rcu/tree_plugin.h ".W"[swait_active(&rdp->nocb_gp_wq)], rdp 2406 kernel/rcu/tree_plugin.h ".B"[!!rdp->nocb_gp_bypass], rdp 2407 kernel/rcu/tree_plugin.h ".G"[!!rdp->nocb_gp_gp], rdp 2408 kernel/rcu/tree_plugin.h (long)rdp->nocb_gp_seq, rdp 2409 kernel/rcu/tree_plugin.h rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops)); rdp 2413 kernel/rcu/tree_plugin.h static void show_rcu_nocb_state(struct rcu_data *rdp) rdp 2415 kernel/rcu/tree_plugin.h struct rcu_segcblist *rsclp = &rdp->cblist; rdp 2420 kernel/rcu/tree_plugin.h if (rdp->nocb_gp_rdp == rdp) rdp 2421 kernel/rcu/tree_plugin.h show_rcu_nocb_gp_state(rdp); rdp 2424 kernel/rcu/tree_plugin.h rdp->cpu, rdp->nocb_gp_rdp->cpu, rdp 2425 kernel/rcu/tree_plugin.h "kK"[!!rdp->nocb_cb_kthread], rdp 2426 kernel/rcu/tree_plugin.h "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)], rdp 2427 kernel/rcu/tree_plugin.h "cC"[!!atomic_read(&rdp->nocb_lock_contended)], rdp 2428 kernel/rcu/tree_plugin.h "lL"[raw_spin_is_locked(&rdp->nocb_lock)], rdp 2429 kernel/rcu/tree_plugin.h "sS"[!!rdp->nocb_cb_sleep], rdp 2430 kernel/rcu/tree_plugin.h ".W"[swait_active(&rdp->nocb_cb_wq)], rdp 2431 kernel/rcu/tree_plugin.h jiffies - rdp->nocb_bypass_first, rdp 2432 kernel/rcu/tree_plugin.h jiffies - rdp->nocb_nobypass_last, rdp 2433 kernel/rcu/tree_plugin.h rdp->nocb_nobypass_count, rdp 2438 kernel/rcu/tree_plugin.h ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)], rdp 2439 kernel/rcu/tree_plugin.h rcu_segcblist_n_cbs(&rdp->cblist)); rdp 2442 kernel/rcu/tree_plugin.h if (rdp->nocb_gp_rdp == rdp) rdp 2445 kernel/rcu/tree_plugin.h waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock); rdp 2446 kernel/rcu/tree_plugin.h wastimer = timer_pending(&rdp->nocb_timer); rdp 2447 kernel/rcu/tree_plugin.h wassleep = swait_active(&rdp->nocb_gp_wq); rdp 2448 kernel/rcu/tree_plugin.h if (!rdp->nocb_defer_wakeup && !rdp->nocb_gp_sleep && rdp 2454 kernel/rcu/tree_plugin.h "dD"[!!rdp->nocb_defer_wakeup], rdp 2456 kernel/rcu/tree_plugin.h "sS"[!!rdp->nocb_gp_sleep], rdp 2463 kernel/rcu/tree_plugin.h static void rcu_nocb_lock(struct rcu_data *rdp) rdp 2468 kernel/rcu/tree_plugin.h static void rcu_nocb_unlock(struct rcu_data *rdp) rdp 2473 kernel/rcu/tree_plugin.h static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp, rdp 2480 kernel/rcu/tree_plugin.h static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp) rdp 2498 kernel/rcu/tree_plugin.h static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp, rdp 2504 kernel/rcu/tree_plugin.h static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp, rdp 2510 kernel/rcu/tree_plugin.h static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty, rdp 2516 kernel/rcu/tree_plugin.h static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) rdp 2520 kernel/rcu/tree_plugin.h static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp) rdp 2525 kernel/rcu/tree_plugin.h static void do_nocb_deferred_wakeup(struct rcu_data *rdp) rdp 2537 kernel/rcu/tree_plugin.h static void show_rcu_nocb_state(struct rcu_data *rdp) rdp 114 kernel/rcu/tree_stall.h static void zero_cpu_stall_ticks(struct rcu_data *rdp) rdp 116 kernel/rcu/tree_stall.h rdp->ticks_this_gp = 0; rdp 117 kernel/rcu/tree_stall.h rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); rdp 118 kernel/rcu/tree_stall.h WRITE_ONCE(rdp->last_fqs_resched, jiffies); rdp 149 kernel/rcu/tree_stall.h struct rcu_data *rdp; rdp 152 kernel/rcu/tree_stall.h rdp = container_of(iwp, struct rcu_data, rcu_iw); rdp 153 kernel/rcu/tree_stall.h rnp = rdp->mynode; rdp 155 kernel/rcu/tree_stall.h if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { rdp 156 kernel/rcu/tree_stall.h rdp->rcu_iw_gp_seq = rnp->gp_seq; rdp 157 kernel/rcu/tree_stall.h rdp->rcu_iw_pending = false; rdp 264 kernel/rcu/tree_stall.h struct rcu_data *rdp = &per_cpu(rcu_data, cpu); rdp 267 kernel/rcu/tree_stall.h rdp->last_accelerate & 0xffff, jiffies & 0xffff, rdp 268 kernel/rcu/tree_stall.h ".l"[rdp->all_lazy], rdp 269 kernel/rcu/tree_stall.h ".L"[!rcu_segcblist_n_nonlazy_cbs(&rdp->cblist)], rdp 270 kernel/rcu/tree_stall.h ".D"[!!rdp->tick_nohz_enabled_snap]); rdp 297 kernel/rcu/tree_stall.h struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); rdp 307 kernel/rcu/tree_stall.h ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq); rdp 312 kernel/rcu/tree_stall.h ticks_value = rdp->ticks_this_gp; rdp 315 kernel/rcu/tree_stall.h delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq); rdp 319 kernel/rcu/tree_stall.h "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], rdp 320 kernel/rcu/tree_stall.h "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], rdp 322 kernel/rcu/tree_stall.h rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : rdp 325 kernel/rcu/tree_stall.h rcu_dynticks_snap(rdp) & 0xfff, rdp 326 kernel/rcu/tree_stall.h rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, rdp 327 kernel/rcu/tree_stall.h rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), rdp 429 kernel/rcu/tree_stall.h struct rcu_data *rdp = this_cpu_ptr(&rcu_data); rdp 444 kernel/rcu/tree_stall.h raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); rdp 446 kernel/rcu/tree_stall.h raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); rdp 477 kernel/rcu/tree_stall.h static void check_cpu_stall(struct rcu_data *rdp) rdp 522 kernel/rcu/tree_stall.h rnp = rdp->mynode; rdp 525 kernel/rcu/tree_stall.h (READ_ONCE(rnp->qsmask) & rdp->grpmask) && rdp 559 kernel/rcu/tree_stall.h struct rcu_data *rdp; rdp 583 kernel/rcu/tree_stall.h rdp = per_cpu_ptr(&rcu_data, cpu); rdp 584 kernel/rcu/tree_stall.h if (rdp->gpwrap || rdp 586 kernel/rcu/tree_stall.h rdp->gp_seq_needed)) rdp 589 kernel/rcu/tree_stall.h cpu, (long)rdp->gp_seq_needed); rdp 593 kernel/rcu/tree_stall.h rdp = per_cpu_ptr(&rcu_data, cpu); rdp 594 kernel/rcu/tree_stall.h if (rcu_segcblist_is_offloaded(&rdp->cblist)) rdp 595 kernel/rcu/tree_stall.h show_rcu_nocb_state(rdp); rdp 605 kernel/rcu/tree_stall.h static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, rdp 666 kernel/rcu/tree_stall.h struct rcu_data *rdp; rdp 676 kernel/rcu/tree_stall.h rdp = this_cpu_ptr(&rcu_data); rdp 677 kernel/rcu/tree_stall.h rcu_check_gp_start_stall(rdp->mynode, rdp, j); rdp 250 sound/pcmcia/pdaudiocf/pdaudiocf_irq.c int size, off, cont, rdp, wdp; rdp 258 sound/pcmcia/pdaudiocf/pdaudiocf_irq.c rdp = inw(chip->port + PDAUDIOCF_REG_RDP); rdp 261 sound/pcmcia/pdaudiocf/pdaudiocf_irq.c size = wdp - rdp;