rcpu               51 arch/arm/mach-tegra/sleep.h .macro cpu_to_halt_reg rd, rcpu
rcpu               60 arch/arm/mach-tegra/sleep.h .macro cpu_to_csr_reg rd, rcpu
rcpu              214 drivers/macintosh/rack-meter.c 	struct rackmeter_cpu *rcpu =
rcpu              216 drivers/macintosh/rack-meter.c 	struct rackmeter *rm = rcpu->rm;
rcpu              223 drivers/macintosh/rack-meter.c 	total_nsecs = cur_nsecs - rcpu->prev_wall;
rcpu              224 drivers/macintosh/rack-meter.c 	rcpu->prev_wall = cur_nsecs;
rcpu              227 drivers/macintosh/rack-meter.c 	idle_nsecs = total_idle_nsecs - rcpu->prev_idle;
rcpu              229 drivers/macintosh/rack-meter.c 	rcpu->prev_idle = total_idle_nsecs;
rcpu              243 drivers/macintosh/rack-meter.c 	rcpu->zero = (cumm == 0);
rcpu              253 drivers/macintosh/rack-meter.c 	schedule_delayed_work_on(cpu, &rcpu->sniffer,
rcpu              273 drivers/macintosh/rack-meter.c 		struct rackmeter_cpu *rcpu;
rcpu              277 drivers/macintosh/rack-meter.c 		rcpu = &rm->cpu[cpu];
rcpu              278 drivers/macintosh/rack-meter.c 		rcpu->prev_idle = get_cpu_idle_time(cpu);
rcpu              279 drivers/macintosh/rack-meter.c 		rcpu->prev_wall = jiffies64_to_nsecs(get_jiffies_64());
rcpu              730 include/linux/bpf.h int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
rcpu              847 include/linux/bpf.h static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
rcpu              142 kernel/bpf/cpumap.c static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
rcpu              144 kernel/bpf/cpumap.c 	atomic_inc(&rcpu->refcnt);
rcpu              150 kernel/bpf/cpumap.c 	struct bpf_cpu_map_entry *rcpu;
rcpu              152 kernel/bpf/cpumap.c 	rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq);
rcpu              160 kernel/bpf/cpumap.c 	kthread_stop(rcpu->kthread);
rcpu              163 kernel/bpf/cpumap.c static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
rcpu              236 kernel/bpf/cpumap.c static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
rcpu              238 kernel/bpf/cpumap.c 	if (atomic_dec_and_test(&rcpu->refcnt)) {
rcpu              240 kernel/bpf/cpumap.c 		__cpu_map_ring_cleanup(rcpu->queue);
rcpu              241 kernel/bpf/cpumap.c 		ptr_ring_cleanup(rcpu->queue, NULL);
rcpu              242 kernel/bpf/cpumap.c 		kfree(rcpu->queue);
rcpu              243 kernel/bpf/cpumap.c 		kfree(rcpu);
rcpu              251 kernel/bpf/cpumap.c 	struct bpf_cpu_map_entry *rcpu = data;
rcpu              260 kernel/bpf/cpumap.c 	while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
rcpu              268 kernel/bpf/cpumap.c 		if (__ptr_ring_empty(rcpu->queue)) {
rcpu              271 kernel/bpf/cpumap.c 			if (__ptr_ring_empty(rcpu->queue)) {
rcpu              286 kernel/bpf/cpumap.c 		n = ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH);
rcpu              312 kernel/bpf/cpumap.c 			skb = cpu_map_build_skb(rcpu, xdpf, skb);
rcpu              324 kernel/bpf/cpumap.c 		trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched);
rcpu              330 kernel/bpf/cpumap.c 	put_cpu_map_entry(rcpu);
rcpu              338 kernel/bpf/cpumap.c 	struct bpf_cpu_map_entry *rcpu;
rcpu              345 kernel/bpf/cpumap.c 	rcpu = kzalloc_node(sizeof(*rcpu), gfp, numa);
rcpu              346 kernel/bpf/cpumap.c 	if (!rcpu)
rcpu              350 kernel/bpf/cpumap.c 	rcpu->bulkq = __alloc_percpu_gfp(sizeof(*rcpu->bulkq),
rcpu              352 kernel/bpf/cpumap.c 	if (!rcpu->bulkq)
rcpu              356 kernel/bpf/cpumap.c 		bq = per_cpu_ptr(rcpu->bulkq, i);
rcpu              357 kernel/bpf/cpumap.c 		bq->obj = rcpu;
rcpu              361 kernel/bpf/cpumap.c 	rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa);
rcpu              362 kernel/bpf/cpumap.c 	if (!rcpu->queue)
rcpu              365 kernel/bpf/cpumap.c 	err = ptr_ring_init(rcpu->queue, qsize, gfp);
rcpu              369 kernel/bpf/cpumap.c 	rcpu->cpu    = cpu;
rcpu              370 kernel/bpf/cpumap.c 	rcpu->map_id = map_id;
rcpu              371 kernel/bpf/cpumap.c 	rcpu->qsize  = qsize;
rcpu              374 kernel/bpf/cpumap.c 	rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
rcpu              376 kernel/bpf/cpumap.c 	if (IS_ERR(rcpu->kthread))
rcpu              379 kernel/bpf/cpumap.c 	get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */
rcpu              380 kernel/bpf/cpumap.c 	get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */
rcpu              383 kernel/bpf/cpumap.c 	kthread_bind(rcpu->kthread, cpu);
rcpu              384 kernel/bpf/cpumap.c 	wake_up_process(rcpu->kthread);
rcpu              386 kernel/bpf/cpumap.c 	return rcpu;
rcpu              389 kernel/bpf/cpumap.c 	ptr_ring_cleanup(rcpu->queue, NULL);
rcpu              391 kernel/bpf/cpumap.c 	kfree(rcpu->queue);
rcpu              393 kernel/bpf/cpumap.c 	free_percpu(rcpu->bulkq);
rcpu              395 kernel/bpf/cpumap.c 	kfree(rcpu);
rcpu              401 kernel/bpf/cpumap.c 	struct bpf_cpu_map_entry *rcpu;
rcpu              409 kernel/bpf/cpumap.c 	rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);
rcpu              413 kernel/bpf/cpumap.c 		struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
rcpu              418 kernel/bpf/cpumap.c 	free_percpu(rcpu->bulkq);
rcpu              420 kernel/bpf/cpumap.c 	put_cpu_map_entry(rcpu);
rcpu              443 kernel/bpf/cpumap.c 				    u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
rcpu              447 kernel/bpf/cpumap.c 	old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu);
rcpu              472 kernel/bpf/cpumap.c 	struct bpf_cpu_map_entry *rcpu;
rcpu              493 kernel/bpf/cpumap.c 		rcpu = NULL; /* Same as deleting */
rcpu              496 kernel/bpf/cpumap.c 		rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id);
rcpu              497 kernel/bpf/cpumap.c 		if (!rcpu)
rcpu              499 kernel/bpf/cpumap.c 		rcpu->cmap = cmap;
rcpu              502 kernel/bpf/cpumap.c 	__cpu_map_entry_replace(cmap, key_cpu, rcpu);
rcpu              541 kernel/bpf/cpumap.c 		struct bpf_cpu_map_entry *rcpu;
rcpu              543 kernel/bpf/cpumap.c 		rcpu = READ_ONCE(cmap->cpu_map[i]);
rcpu              544 kernel/bpf/cpumap.c 		if (!rcpu)
rcpu              558 kernel/bpf/cpumap.c 	struct bpf_cpu_map_entry *rcpu;
rcpu              563 kernel/bpf/cpumap.c 	rcpu = READ_ONCE(cmap->cpu_map[key]);
rcpu              564 kernel/bpf/cpumap.c 	return rcpu;
rcpu              569 kernel/bpf/cpumap.c 	struct bpf_cpu_map_entry *rcpu =
rcpu              572 kernel/bpf/cpumap.c 	return rcpu ? &rcpu->qsize : NULL;
rcpu              604 kernel/bpf/cpumap.c 	struct bpf_cpu_map_entry *rcpu = bq->obj;
rcpu              606 kernel/bpf/cpumap.c 	const int to_cpu = rcpu->cpu;
rcpu              613 kernel/bpf/cpumap.c 	q = rcpu->queue;
rcpu              636 kernel/bpf/cpumap.c 	trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
rcpu              643 kernel/bpf/cpumap.c static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
rcpu              645 kernel/bpf/cpumap.c 	struct list_head *flush_list = this_cpu_ptr(rcpu->cmap->flush_list);
rcpu              646 kernel/bpf/cpumap.c 	struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
rcpu              668 kernel/bpf/cpumap.c int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
rcpu              680 kernel/bpf/cpumap.c 	bq_enqueue(rcpu, xdpf);
rcpu             3532 net/core/filter.c 		struct bpf_cpu_map_entry *rcpu = fwd;
rcpu             3534 net/core/filter.c 		err = cpu_map_enqueue(rcpu, xdp, dev_rx);
rcpu              108 samples/bpf/cpustat_user.c 	int rcpu, i, ret;
rcpu              116 samples/bpf/cpustat_user.c 	rcpu = sched_getcpu();
rcpu              117 samples/bpf/cpustat_user.c 	if (rcpu < 0)
rcpu              126 samples/bpf/cpustat_user.c 		if (i == rcpu)