Lines Matching refs:pd
36 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) in padata_index_to_cpu() argument
40 target_cpu = cpumask_first(pd->cpumask.pcpu); in padata_index_to_cpu()
42 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); in padata_index_to_cpu()
47 static int padata_cpu_hash(struct parallel_data *pd) in padata_cpu_hash() argument
57 seq_nr = atomic_inc_return(&pd->seq_nr); in padata_cpu_hash()
58 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); in padata_cpu_hash()
60 return padata_index_to_cpu(pd, cpu_index); in padata_cpu_hash()
66 struct parallel_data *pd; in padata_parallel_worker() local
73 pd = pqueue->pd; in padata_parallel_worker()
74 pinst = pd->pinst; in padata_parallel_worker()
111 struct parallel_data *pd; in padata_do_parallel() local
115 pd = rcu_dereference_bh(pinst->pd); in padata_do_parallel()
121 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) in padata_do_parallel()
128 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) in padata_do_parallel()
132 atomic_inc(&pd->refcnt); in padata_do_parallel()
133 padata->pd = pd; in padata_do_parallel()
136 target_cpu = padata_cpu_hash(pd); in padata_do_parallel()
137 queue = per_cpu_ptr(pd->pqueue, target_cpu); in padata_do_parallel()
169 static struct padata_priv *padata_get_next(struct parallel_data *pd) in padata_get_next() argument
177 num_cpus = cpumask_weight(pd->cpumask.pcpu); in padata_get_next()
183 next_nr = pd->processed; in padata_get_next()
185 cpu = padata_index_to_cpu(pd, next_index); in padata_get_next()
186 next_queue = per_cpu_ptr(pd->pqueue, cpu); in padata_get_next()
198 atomic_dec(&pd->reorder_objects); in padata_get_next()
201 pd->processed++; in padata_get_next()
206 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { in padata_get_next()
216 static void padata_reorder(struct parallel_data *pd) in padata_reorder() argument
221 struct padata_instance *pinst = pd->pinst; in padata_reorder()
233 if (!spin_trylock_bh(&pd->lock)) in padata_reorder()
237 padata = padata_get_next(pd); in padata_reorder()
254 del_timer(&pd->timer); in padata_reorder()
255 spin_unlock_bh(&pd->lock); in padata_reorder()
260 squeue = per_cpu_ptr(pd->squeue, cb_cpu); in padata_reorder()
269 spin_unlock_bh(&pd->lock); in padata_reorder()
276 if (atomic_read(&pd->reorder_objects) in padata_reorder()
278 mod_timer(&pd->timer, jiffies + HZ); in padata_reorder()
280 del_timer(&pd->timer); in padata_reorder()
287 struct parallel_data *pd = (struct parallel_data *)arg; in padata_reorder_timer() local
289 padata_reorder(pd); in padata_reorder_timer()
295 struct parallel_data *pd; in padata_serial_worker() local
300 pd = squeue->pd; in padata_serial_worker()
315 atomic_dec(&pd->refcnt); in padata_serial_worker()
332 struct parallel_data *pd; in padata_do_serial() local
334 pd = padata->pd; in padata_do_serial()
337 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_do_serial()
340 atomic_inc(&pd->reorder_objects); in padata_do_serial()
346 padata_reorder(pd); in padata_do_serial()
350 static int padata_setup_cpumasks(struct parallel_data *pd, in padata_setup_cpumasks() argument
354 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) in padata_setup_cpumasks()
357 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); in padata_setup_cpumasks()
358 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { in padata_setup_cpumasks()
359 free_cpumask_var(pd->cpumask.cbcpu); in padata_setup_cpumasks()
363 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); in padata_setup_cpumasks()
374 static void padata_init_squeues(struct parallel_data *pd) in padata_init_squeues() argument
379 for_each_cpu(cpu, pd->cpumask.cbcpu) { in padata_init_squeues()
380 squeue = per_cpu_ptr(pd->squeue, cpu); in padata_init_squeues()
381 squeue->pd = pd; in padata_init_squeues()
388 static void padata_init_pqueues(struct parallel_data *pd) in padata_init_pqueues() argument
394 for_each_cpu(cpu, pd->cpumask.pcpu) { in padata_init_pqueues()
395 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_init_pqueues()
396 pqueue->pd = pd; in padata_init_pqueues()
412 struct parallel_data *pd; in padata_alloc_pd() local
414 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); in padata_alloc_pd()
415 if (!pd) in padata_alloc_pd()
418 pd->pqueue = alloc_percpu(struct padata_parallel_queue); in padata_alloc_pd()
419 if (!pd->pqueue) in padata_alloc_pd()
422 pd->squeue = alloc_percpu(struct padata_serial_queue); in padata_alloc_pd()
423 if (!pd->squeue) in padata_alloc_pd()
425 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) in padata_alloc_pd()
428 padata_init_pqueues(pd); in padata_alloc_pd()
429 padata_init_squeues(pd); in padata_alloc_pd()
430 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); in padata_alloc_pd()
431 atomic_set(&pd->seq_nr, -1); in padata_alloc_pd()
432 atomic_set(&pd->reorder_objects, 0); in padata_alloc_pd()
433 atomic_set(&pd->refcnt, 0); in padata_alloc_pd()
434 pd->pinst = pinst; in padata_alloc_pd()
435 spin_lock_init(&pd->lock); in padata_alloc_pd()
437 return pd; in padata_alloc_pd()
440 free_percpu(pd->squeue); in padata_alloc_pd()
442 free_percpu(pd->pqueue); in padata_alloc_pd()
444 kfree(pd); in padata_alloc_pd()
449 static void padata_free_pd(struct parallel_data *pd) in padata_free_pd() argument
451 free_cpumask_var(pd->cpumask.pcpu); in padata_free_pd()
452 free_cpumask_var(pd->cpumask.cbcpu); in padata_free_pd()
453 free_percpu(pd->pqueue); in padata_free_pd()
454 free_percpu(pd->squeue); in padata_free_pd()
455 kfree(pd); in padata_free_pd()
459 static void padata_flush_queues(struct parallel_data *pd) in padata_flush_queues() argument
465 for_each_cpu(cpu, pd->cpumask.pcpu) { in padata_flush_queues()
466 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_flush_queues()
470 del_timer_sync(&pd->timer); in padata_flush_queues()
472 if (atomic_read(&pd->reorder_objects)) in padata_flush_queues()
473 padata_reorder(pd); in padata_flush_queues()
475 for_each_cpu(cpu, pd->cpumask.cbcpu) { in padata_flush_queues()
476 squeue = per_cpu_ptr(pd->squeue, cpu); in padata_flush_queues()
480 BUG_ON(atomic_read(&pd->refcnt) != 0); in padata_flush_queues()
498 padata_flush_queues(pinst->pd); in __padata_stop()
506 struct parallel_data *pd_old = pinst->pd; in padata_replace()
511 rcu_assign_pointer(pinst->pd, pd_new); in padata_replace()
581 struct parallel_data *pd; in __padata_set_cpumasks() local
594 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); in __padata_set_cpumasks()
595 if (!pd) in __padata_set_cpumasks()
601 padata_replace(pinst, pd); in __padata_set_cpumasks()
679 struct parallel_data *pd; in __padata_add_cpu() local
682 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, in __padata_add_cpu()
684 if (!pd) in __padata_add_cpu()
687 padata_replace(pinst, pd); in __padata_add_cpu()
735 struct parallel_data *pd = NULL; in __padata_remove_cpu() local
743 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, in __padata_remove_cpu()
745 if (!pd) in __padata_remove_cpu()
748 padata_replace(pinst, pd); in __padata_remove_cpu()
750 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu); in __padata_remove_cpu()
751 cpumask_clear_cpu(cpu, pd->cpumask.pcpu); in __padata_remove_cpu()
884 padata_free_pd(pinst->pd); in __padata_free()
1041 struct parallel_data *pd = NULL; in padata_alloc() local
1058 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); in padata_alloc()
1059 if (!pd) in padata_alloc()
1062 rcu_assign_pointer(pinst->pd, pd); in padata_alloc()