Lines Matching refs:work

425 	struct work_struct *work = addr;  in work_fixup_init()  local
429 cancel_work_sync(work); in work_fixup_init()
430 debug_object_init(work, &work_debug_descr); in work_fixup_init()
444 struct work_struct *work = addr; in work_fixup_activate() local
454 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { in work_fixup_activate()
455 debug_object_init(work, &work_debug_descr); in work_fixup_activate()
456 debug_object_activate(work, &work_debug_descr); in work_fixup_activate()
476 struct work_struct *work = addr; in work_fixup_free() local
480 cancel_work_sync(work); in work_fixup_free()
481 debug_object_free(work, &work_debug_descr); in work_fixup_free()
496 static inline void debug_work_activate(struct work_struct *work) in debug_work_activate() argument
498 debug_object_activate(work, &work_debug_descr); in debug_work_activate()
501 static inline void debug_work_deactivate(struct work_struct *work) in debug_work_deactivate() argument
503 debug_object_deactivate(work, &work_debug_descr); in debug_work_deactivate()
506 void __init_work(struct work_struct *work, int onstack) in __init_work() argument
509 debug_object_init_on_stack(work, &work_debug_descr); in __init_work()
511 debug_object_init(work, &work_debug_descr); in __init_work()
515 void destroy_work_on_stack(struct work_struct *work) in destroy_work_on_stack() argument
517 debug_object_free(work, &work_debug_descr); in destroy_work_on_stack()
521 void destroy_delayed_work_on_stack(struct delayed_work *work) in destroy_delayed_work_on_stack() argument
523 destroy_timer_on_stack(&work->timer); in destroy_delayed_work_on_stack()
524 debug_object_free(&work->work, &work_debug_descr); in destroy_delayed_work_on_stack()
529 static inline void debug_work_activate(struct work_struct *work) { } in debug_work_activate() argument
530 static inline void debug_work_deactivate(struct work_struct *work) { } in debug_work_deactivate() argument
589 static int get_work_color(struct work_struct *work) in get_work_color() argument
591 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & in get_work_color()
620 static inline void set_work_data(struct work_struct *work, unsigned long data, in set_work_data() argument
623 WARN_ON_ONCE(!work_pending(work)); in set_work_data()
624 atomic_long_set(&work->data, data | flags | work_static(work)); in set_work_data()
627 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
630 set_work_data(work, (unsigned long)pwq, in set_work_pwq()
634 static void set_work_pool_and_keep_pending(struct work_struct *work, in set_work_pool_and_keep_pending() argument
637 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, in set_work_pool_and_keep_pending()
641 static void set_work_pool_and_clear_pending(struct work_struct *work, in set_work_pool_and_clear_pending() argument
651 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); in set_work_pool_and_clear_pending()
683 static void clear_work_data(struct work_struct *work) in clear_work_data() argument
686 set_work_data(work, WORK_STRUCT_NO_POOL, 0); in clear_work_data()
689 static struct pool_workqueue *get_work_pwq(struct work_struct *work) in get_work_pwq() argument
691 unsigned long data = atomic_long_read(&work->data); in get_work_pwq()
714 static struct worker_pool *get_work_pool(struct work_struct *work) in get_work_pool() argument
716 unsigned long data = atomic_long_read(&work->data); in get_work_pool()
739 static int get_work_pool_id(struct work_struct *work) in get_work_pool_id() argument
741 unsigned long data = atomic_long_read(&work->data); in get_work_pool_id()
750 static void mark_work_canceling(struct work_struct *work) in mark_work_canceling() argument
752 unsigned long pool_id = get_work_pool_id(work); in mark_work_canceling()
755 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); in mark_work_canceling()
758 static bool work_is_canceling(struct work_struct *work) in work_is_canceling() argument
760 unsigned long data = atomic_long_read(&work->data); in work_is_canceling()
1008 struct work_struct *work) in find_worker_executing_work() argument
1013 (unsigned long)work) in find_worker_executing_work()
1014 if (worker->current_work == work && in find_worker_executing_work()
1015 worker->current_func == work->func) in find_worker_executing_work()
1038 static void move_linked_works(struct work_struct *work, struct list_head *head, in move_linked_works() argument
1047 list_for_each_entry_safe_from(work, n, NULL, entry) { in move_linked_works()
1048 list_move_tail(&work->entry, head); in move_linked_works()
1049 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) in move_linked_works()
1120 static void pwq_activate_delayed_work(struct work_struct *work) in pwq_activate_delayed_work() argument
1122 struct pool_workqueue *pwq = get_work_pwq(work); in pwq_activate_delayed_work()
1124 trace_workqueue_activate_work(work); in pwq_activate_delayed_work()
1125 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_delayed_work()
1126 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); in pwq_activate_delayed_work()
1132 struct work_struct *work = list_first_entry(&pwq->delayed_works, in pwq_activate_first_delayed() local
1135 pwq_activate_delayed_work(work); in pwq_activate_first_delayed()
1212 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, in try_to_grab_pending() argument
1222 struct delayed_work *dwork = to_delayed_work(work); in try_to_grab_pending()
1234 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) in try_to_grab_pending()
1241 pool = get_work_pool(work); in try_to_grab_pending()
1254 pwq = get_work_pwq(work); in try_to_grab_pending()
1256 debug_work_deactivate(work); in try_to_grab_pending()
1265 if (*work_data_bits(work) & WORK_STRUCT_DELAYED) in try_to_grab_pending()
1266 pwq_activate_delayed_work(work); in try_to_grab_pending()
1268 list_del_init(&work->entry); in try_to_grab_pending()
1269 pwq_dec_nr_in_flight(pwq, get_work_color(work)); in try_to_grab_pending()
1272 set_work_pool_and_keep_pending(work, pool->id); in try_to_grab_pending()
1280 if (work_is_canceling(work)) in try_to_grab_pending()
1299 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
1305 set_work_pwq(work, pwq, extra_flags); in insert_work()
1306 list_add_tail(&work->entry, head); in insert_work()
1337 struct work_struct *work) in __queue_work() argument
1353 debug_work_activate(work); in __queue_work()
1374 last_pool = get_work_pool(work); in __queue_work()
1380 worker = find_worker_executing_work(last_pool, work); in __queue_work()
1413 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
1415 if (WARN_ON(!list_empty(&work->entry))) { in __queue_work()
1424 trace_workqueue_activate_work(work); in __queue_work()
1432 insert_work(pwq, work, worklist, work_flags); in __queue_work()
1449 struct work_struct *work) in queue_work_on() argument
1456 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_work_on()
1457 __queue_work(cpu, wq, work); in queue_work_on()
1471 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in delayed_work_timer_fn()
1479 struct work_struct *work = &dwork->work; in __queue_delayed_work() local
1484 WARN_ON_ONCE(!list_empty(&work->entry)); in __queue_delayed_work()
1493 __queue_work(cpu, wq, &dwork->work); in __queue_delayed_work()
1523 struct work_struct *work = &dwork->work; in queue_delayed_work_on() local
1530 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_delayed_work_on()
1565 ret = try_to_grab_pending(&dwork->work, true, &flags); in mod_delayed_work_on()
1832 static void send_mayday(struct work_struct *work) in send_mayday() argument
1834 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday()
1858 struct work_struct *work; in pool_mayday_timeout() local
1870 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
1871 send_mayday(work); in pool_mayday_timeout()
1990 static void process_one_work(struct worker *worker, struct work_struct *work) in process_one_work() argument
1994 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work()
2009 lockdep_copy_map(&lockdep_map, &work->lockdep_map); in process_one_work()
2021 collision = find_worker_executing_work(pool, work); in process_one_work()
2023 move_linked_works(work, &collision->scheduled, NULL); in process_one_work()
2028 debug_work_deactivate(work); in process_one_work()
2029 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
2030 worker->current_work = work; in process_one_work()
2031 worker->current_func = work->func; in process_one_work()
2033 work_color = get_work_color(work); in process_one_work()
2035 list_del_init(&work->entry); in process_one_work()
2062 set_work_pool_and_clear_pending(work, pool->id); in process_one_work()
2068 trace_workqueue_execute_start(work); in process_one_work()
2069 worker->current_func(work); in process_one_work()
2074 trace_workqueue_execute_end(work); in process_one_work()
2127 struct work_struct *work = list_first_entry(&worker->scheduled, in process_scheduled_works() local
2129 process_one_work(worker, work); in process_scheduled_works()
2195 struct work_struct *work = in worker_thread() local
2199 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { in worker_thread()
2201 process_one_work(worker, work); in worker_thread()
2205 move_linked_works(work, &worker->scheduled, NULL); in worker_thread()
2281 struct work_struct *work, *n; in rescuer_thread() local
2298 list_for_each_entry_safe(work, n, &pool->worklist, entry) in rescuer_thread()
2299 if (get_work_pwq(work) == pwq) in rescuer_thread()
2300 move_linked_works(work, scheduled, &n); in rescuer_thread()
2359 struct work_struct work; member
2364 static void wq_barrier_func(struct work_struct *work) in wq_barrier_func() argument
2366 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); in wq_barrier_func()
2407 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); in insert_wq_barrier()
2408 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); in insert_wq_barrier()
2427 debug_work_activate(&barr->work); in insert_wq_barrier()
2428 insert_work(pwq, &barr->work, head, in insert_wq_barrier()
2711 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) in start_flush_work() argument
2720 pool = get_work_pool(work); in start_flush_work()
2728 pwq = get_work_pwq(work); in start_flush_work()
2733 worker = find_worker_executing_work(pool, work); in start_flush_work()
2739 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
2771 bool flush_work(struct work_struct *work) in flush_work() argument
2775 lock_map_acquire(&work->lockdep_map); in flush_work()
2776 lock_map_release(&work->lockdep_map); in flush_work()
2778 if (start_flush_work(work, &barr)) { in flush_work()
2780 destroy_work_on_stack(&barr.work); in flush_work()
2790 struct work_struct *work; member
2797 if (cwait->work != key) in cwt_wakefn()
2802 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) in __cancel_work_timer() argument
2809 ret = try_to_grab_pending(work, is_dwork, &flags); in __cancel_work_timer()
2831 cwait.work = work; in __cancel_work_timer()
2835 if (work_is_canceling(work)) in __cancel_work_timer()
2842 mark_work_canceling(work); in __cancel_work_timer()
2845 flush_work(work); in __cancel_work_timer()
2846 clear_work_data(work); in __cancel_work_timer()
2855 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); in __cancel_work_timer()
2878 bool cancel_work_sync(struct work_struct *work) in cancel_work_sync() argument
2880 return __cancel_work_timer(work, false); in cancel_work_sync()
2900 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in flush_delayed_work()
2902 return flush_work(&dwork->work); in flush_delayed_work()
2928 ret = try_to_grab_pending(&dwork->work, true, &flags); in cancel_delayed_work()
2934 set_work_pool_and_clear_pending(&dwork->work, in cancel_delayed_work()
2935 get_work_pool_id(&dwork->work)); in cancel_delayed_work()
2952 return __cancel_work_timer(&dwork->work, true); in cancel_delayed_work_sync()
2979 struct work_struct *work = per_cpu_ptr(works, cpu); in schedule_on_each_cpu() local
2981 INIT_WORK(work, func); in schedule_on_each_cpu()
2982 schedule_work_on(cpu, work); in schedule_on_each_cpu()
3008 fn(&ew->work); in execute_in_process_context()
3012 INIT_WORK(&ew->work, fn); in execute_in_process_context()
3013 schedule_work(&ew->work); in execute_in_process_context()
3306 static void pwq_unbound_release_workfn(struct work_struct *work) in pwq_unbound_release_workfn() argument
3308 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_unbound_release_workfn()
4107 unsigned int work_busy(struct work_struct *work) in work_busy() argument
4113 if (work_pending(work)) in work_busy()
4117 pool = get_work_pool(work); in work_busy()
4120 if (find_worker_executing_work(pool, work)) in work_busy()
4215 static void pr_cont_work(bool comma, struct work_struct *work) in pr_cont_work() argument
4217 if (work->func == wq_barrier_func) { in pr_cont_work()
4220 barr = container_of(work, struct wq_barrier, work); in pr_cont_work()
4225 pr_cont("%s %pf", comma ? "," : "", work->func); in pr_cont_work()
4232 struct work_struct *work; in show_pwq() local
4261 list_for_each_entry(work, &worker->scheduled, entry) in show_pwq()
4262 pr_cont_work(false, work); in show_pwq()
4268 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4269 if (get_work_pwq(work) == pwq) { in show_pwq()
4278 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4279 if (get_work_pwq(work) != pwq) in show_pwq()
4282 pr_cont_work(comma, work); in show_pwq()
4283 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
4292 list_for_each_entry(work, &pwq->delayed_works, entry) { in show_pwq()
4293 pr_cont_work(comma, work); in show_pwq()
4294 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
4382 static void wq_unbind_fn(struct work_struct *work) in wq_unbind_fn() argument
4626 struct work_struct work; member
4632 static void work_for_cpu_fn(struct work_struct *work) in work_for_cpu_fn() argument
4634 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); in work_for_cpu_fn()
4654 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); in work_on_cpu()
4655 schedule_work_on(cpu, &wfc.work); in work_on_cpu()
4656 flush_work(&wfc.work); in work_on_cpu()
4657 destroy_work_on_stack(&wfc.work); in work_on_cpu()