rcl 1017 arch/x86/kvm/emulate.c FASTOP2CL(rcl); rcl 50 drivers/gpu/drm/vc4/vc4_render_cl.c struct drm_gem_cma_object *rcl; rcl 58 drivers/gpu/drm/vc4/vc4_render_cl.c *(u8 *)(setup->rcl->vaddr + setup->next_offset) = val; rcl 64 drivers/gpu/drm/vc4/vc4_render_cl.c *(u16 *)(setup->rcl->vaddr + setup->next_offset) = val; rcl 70 drivers/gpu/drm/vc4/vc4_render_cl.c *(u32 *)(setup->rcl->vaddr + setup->next_offset) = val; rcl 331 drivers/gpu/drm/vc4/vc4_render_cl.c setup->rcl = &vc4_bo_create(dev, size, true, VC4_BO_TYPE_RCL)->base; rcl 332 drivers/gpu/drm/vc4/vc4_render_cl.c if (IS_ERR(setup->rcl)) rcl 333 drivers/gpu/drm/vc4/vc4_render_cl.c return PTR_ERR(setup->rcl); rcl 334 drivers/gpu/drm/vc4/vc4_render_cl.c list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head, rcl 377 drivers/gpu/drm/vc4/vc4_render_cl.c exec->ct1ca = setup->rcl->paddr; rcl 378 drivers/gpu/drm/vc4/vc4_render_cl.c exec->ct1ea = setup->rcl->paddr + setup->next_offset; rcl 2118 kernel/rcu/tree.c struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); rcl 2149 kernel/rcu/tree.c rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl); rcl 2155 kernel/rcu/tree.c rhp = rcu_cblist_dequeue(&rcl); rcl 2156 kernel/rcu/tree.c for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { rcl 2159 kernel/rcu/tree.c rcu_cblist_dequeued_lazy(&rcl); rcl 2164 kernel/rcu/tree.c if (-rcl.len >= bl && !offloaded && rcl 2170 kernel/rcu/tree.c if (likely((-rcl.len & 31) || local_clock() < tlimit)) rcl 2187 kernel/rcu/tree.c count = -rcl.len; rcl 2188 kernel/rcu/tree.c trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), rcl 2192 kernel/rcu/tree.c rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); rcl 2194 kernel/rcu/tree.c rcu_segcblist_insert_count(&rdp->cblist, &rcl); rcl 1694 kernel/rcu/tree_plugin.h struct rcu_cblist rcl; rcl 1706 kernel/rcu/tree_plugin.h rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp); rcl 1707 kernel/rcu/tree_plugin.h rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);