Lines Matching refs:work
430 struct work_struct *work = addr; in work_fixup_init() local
434 cancel_work_sync(work); in work_fixup_init()
435 debug_object_init(work, &work_debug_descr); in work_fixup_init()
449 struct work_struct *work = addr; in work_fixup_activate() local
459 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { in work_fixup_activate()
460 debug_object_init(work, &work_debug_descr); in work_fixup_activate()
461 debug_object_activate(work, &work_debug_descr); in work_fixup_activate()
481 struct work_struct *work = addr; in work_fixup_free() local
485 cancel_work_sync(work); in work_fixup_free()
486 debug_object_free(work, &work_debug_descr); in work_fixup_free()
501 static inline void debug_work_activate(struct work_struct *work) in debug_work_activate() argument
503 debug_object_activate(work, &work_debug_descr); in debug_work_activate()
506 static inline void debug_work_deactivate(struct work_struct *work) in debug_work_deactivate() argument
508 debug_object_deactivate(work, &work_debug_descr); in debug_work_deactivate()
511 void __init_work(struct work_struct *work, int onstack) in __init_work() argument
514 debug_object_init_on_stack(work, &work_debug_descr); in __init_work()
516 debug_object_init(work, &work_debug_descr); in __init_work()
520 void destroy_work_on_stack(struct work_struct *work) in destroy_work_on_stack() argument
522 debug_object_free(work, &work_debug_descr); in destroy_work_on_stack()
526 void destroy_delayed_work_on_stack(struct delayed_work *work) in destroy_delayed_work_on_stack() argument
528 destroy_timer_on_stack(&work->timer); in destroy_delayed_work_on_stack()
529 debug_object_free(&work->work, &work_debug_descr); in destroy_delayed_work_on_stack()
534 static inline void debug_work_activate(struct work_struct *work) { } in debug_work_activate() argument
535 static inline void debug_work_deactivate(struct work_struct *work) { } in debug_work_deactivate() argument
594 static int get_work_color(struct work_struct *work) in get_work_color() argument
596 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & in get_work_color()
625 static inline void set_work_data(struct work_struct *work, unsigned long data, in set_work_data() argument
628 WARN_ON_ONCE(!work_pending(work)); in set_work_data()
629 atomic_long_set(&work->data, data | flags | work_static(work)); in set_work_data()
632 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, in set_work_pwq() argument
635 set_work_data(work, (unsigned long)pwq, in set_work_pwq()
639 static void set_work_pool_and_keep_pending(struct work_struct *work, in set_work_pool_and_keep_pending() argument
642 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, in set_work_pool_and_keep_pending()
646 static void set_work_pool_and_clear_pending(struct work_struct *work, in set_work_pool_and_clear_pending() argument
656 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); in set_work_pool_and_clear_pending()
688 static void clear_work_data(struct work_struct *work) in clear_work_data() argument
691 set_work_data(work, WORK_STRUCT_NO_POOL, 0); in clear_work_data()
694 static struct pool_workqueue *get_work_pwq(struct work_struct *work) in get_work_pwq() argument
696 unsigned long data = atomic_long_read(&work->data); in get_work_pwq()
719 static struct worker_pool *get_work_pool(struct work_struct *work) in get_work_pool() argument
721 unsigned long data = atomic_long_read(&work->data); in get_work_pool()
744 static int get_work_pool_id(struct work_struct *work) in get_work_pool_id() argument
746 unsigned long data = atomic_long_read(&work->data); in get_work_pool_id()
755 static void mark_work_canceling(struct work_struct *work) in mark_work_canceling() argument
757 unsigned long pool_id = get_work_pool_id(work); in mark_work_canceling()
760 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); in mark_work_canceling()
763 static bool work_is_canceling(struct work_struct *work) in work_is_canceling() argument
765 unsigned long data = atomic_long_read(&work->data); in work_is_canceling()
1013 struct work_struct *work) in find_worker_executing_work() argument
1018 (unsigned long)work) in find_worker_executing_work()
1019 if (worker->current_work == work && in find_worker_executing_work()
1020 worker->current_func == work->func) in find_worker_executing_work()
1043 static void move_linked_works(struct work_struct *work, struct list_head *head, in move_linked_works() argument
1052 list_for_each_entry_safe_from(work, n, NULL, entry) { in move_linked_works()
1053 list_move_tail(&work->entry, head); in move_linked_works()
1054 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) in move_linked_works()
1125 static void pwq_activate_delayed_work(struct work_struct *work) in pwq_activate_delayed_work() argument
1127 struct pool_workqueue *pwq = get_work_pwq(work); in pwq_activate_delayed_work()
1129 trace_workqueue_activate_work(work); in pwq_activate_delayed_work()
1130 move_linked_works(work, &pwq->pool->worklist, NULL); in pwq_activate_delayed_work()
1131 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); in pwq_activate_delayed_work()
1137 struct work_struct *work = list_first_entry(&pwq->delayed_works, in pwq_activate_first_delayed() local
1140 pwq_activate_delayed_work(work); in pwq_activate_first_delayed()
1217 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, in try_to_grab_pending() argument
1227 struct delayed_work *dwork = to_delayed_work(work); in try_to_grab_pending()
1239 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) in try_to_grab_pending()
1246 pool = get_work_pool(work); in try_to_grab_pending()
1259 pwq = get_work_pwq(work); in try_to_grab_pending()
1261 debug_work_deactivate(work); in try_to_grab_pending()
1270 if (*work_data_bits(work) & WORK_STRUCT_DELAYED) in try_to_grab_pending()
1271 pwq_activate_delayed_work(work); in try_to_grab_pending()
1273 list_del_init(&work->entry); in try_to_grab_pending()
1274 pwq_dec_nr_in_flight(pwq, get_work_color(work)); in try_to_grab_pending()
1277 set_work_pool_and_keep_pending(work, pool->id); in try_to_grab_pending()
1285 if (work_is_canceling(work)) in try_to_grab_pending()
1304 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, in insert_work() argument
1310 set_work_pwq(work, pwq, extra_flags); in insert_work()
1311 list_add_tail(&work->entry, head); in insert_work()
1342 struct work_struct *work) in __queue_work() argument
1358 debug_work_activate(work); in __queue_work()
1379 last_pool = get_work_pool(work); in __queue_work()
1385 worker = find_worker_executing_work(last_pool, work); in __queue_work()
1418 trace_workqueue_queue_work(req_cpu, pwq, work); in __queue_work()
1420 if (WARN_ON(!list_empty(&work->entry))) { in __queue_work()
1429 trace_workqueue_activate_work(work); in __queue_work()
1437 insert_work(pwq, work, worklist, work_flags); in __queue_work()
1454 struct work_struct *work) in queue_work_on() argument
1461 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_work_on()
1462 __queue_work(cpu, wq, work); in queue_work_on()
1476 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in delayed_work_timer_fn()
1484 struct work_struct *work = &dwork->work; in __queue_delayed_work() local
1489 WARN_ON_ONCE(!list_empty(&work->entry)); in __queue_delayed_work()
1498 __queue_work(cpu, wq, &dwork->work); in __queue_delayed_work()
1528 struct work_struct *work = &dwork->work; in queue_delayed_work_on() local
1535 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { in queue_delayed_work_on()
1570 ret = try_to_grab_pending(&dwork->work, true, &flags); in mod_delayed_work_on()
1839 static void send_mayday(struct work_struct *work) in send_mayday() argument
1841 struct pool_workqueue *pwq = get_work_pwq(work); in send_mayday()
1865 struct work_struct *work; in pool_mayday_timeout() local
1877 list_for_each_entry(work, &pool->worklist, entry) in pool_mayday_timeout()
1878 send_mayday(work); in pool_mayday_timeout()
1997 static void process_one_work(struct worker *worker, struct work_struct *work) in process_one_work() argument
2001 struct pool_workqueue *pwq = get_work_pwq(work); in process_one_work()
2016 lockdep_copy_map(&lockdep_map, &work->lockdep_map); in process_one_work()
2028 collision = find_worker_executing_work(pool, work); in process_one_work()
2030 move_linked_works(work, &collision->scheduled, NULL); in process_one_work()
2035 debug_work_deactivate(work); in process_one_work()
2036 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); in process_one_work()
2037 worker->current_work = work; in process_one_work()
2038 worker->current_func = work->func; in process_one_work()
2040 work_color = get_work_color(work); in process_one_work()
2042 list_del_init(&work->entry); in process_one_work()
2069 set_work_pool_and_clear_pending(work, pool->id); in process_one_work()
2075 trace_workqueue_execute_start(work); in process_one_work()
2076 worker->current_func(work); in process_one_work()
2081 trace_workqueue_execute_end(work); in process_one_work()
2134 struct work_struct *work = list_first_entry(&worker->scheduled, in process_scheduled_works() local
2136 process_one_work(worker, work); in process_scheduled_works()
2202 struct work_struct *work = in worker_thread() local
2206 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { in worker_thread()
2208 process_one_work(worker, work); in worker_thread()
2212 move_linked_works(work, &worker->scheduled, NULL); in worker_thread()
2288 struct work_struct *work, *n; in rescuer_thread() local
2305 list_for_each_entry_safe(work, n, &pool->worklist, entry) in rescuer_thread()
2306 if (get_work_pwq(work) == pwq) in rescuer_thread()
2307 move_linked_works(work, scheduled, &n); in rescuer_thread()
2366 struct work_struct work; member
2371 static void wq_barrier_func(struct work_struct *work) in wq_barrier_func() argument
2373 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); in wq_barrier_func()
2414 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); in insert_wq_barrier()
2415 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); in insert_wq_barrier()
2434 debug_work_activate(&barr->work); in insert_wq_barrier()
2435 insert_work(pwq, &barr->work, head, in insert_wq_barrier()
2718 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) in start_flush_work() argument
2727 pool = get_work_pool(work); in start_flush_work()
2735 pwq = get_work_pwq(work); in start_flush_work()
2740 worker = find_worker_executing_work(pool, work); in start_flush_work()
2746 insert_wq_barrier(pwq, barr, work, worker); in start_flush_work()
2778 bool flush_work(struct work_struct *work) in flush_work() argument
2782 lock_map_acquire(&work->lockdep_map); in flush_work()
2783 lock_map_release(&work->lockdep_map); in flush_work()
2785 if (start_flush_work(work, &barr)) { in flush_work()
2787 destroy_work_on_stack(&barr.work); in flush_work()
2797 struct work_struct *work; member
2804 if (cwait->work != key) in cwt_wakefn()
2809 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) in __cancel_work_timer() argument
2816 ret = try_to_grab_pending(work, is_dwork, &flags); in __cancel_work_timer()
2838 cwait.work = work; in __cancel_work_timer()
2842 if (work_is_canceling(work)) in __cancel_work_timer()
2849 mark_work_canceling(work); in __cancel_work_timer()
2852 flush_work(work); in __cancel_work_timer()
2853 clear_work_data(work); in __cancel_work_timer()
2862 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); in __cancel_work_timer()
2885 bool cancel_work_sync(struct work_struct *work) in cancel_work_sync() argument
2887 return __cancel_work_timer(work, false); in cancel_work_sync()
2907 __queue_work(dwork->cpu, dwork->wq, &dwork->work); in flush_delayed_work()
2909 return flush_work(&dwork->work); in flush_delayed_work()
2935 ret = try_to_grab_pending(&dwork->work, true, &flags); in cancel_delayed_work()
2941 set_work_pool_and_clear_pending(&dwork->work, in cancel_delayed_work()
2942 get_work_pool_id(&dwork->work)); in cancel_delayed_work()
2959 return __cancel_work_timer(&dwork->work, true); in cancel_delayed_work_sync()
2986 struct work_struct *work = per_cpu_ptr(works, cpu); in schedule_on_each_cpu() local
2988 INIT_WORK(work, func); in schedule_on_each_cpu()
2989 schedule_work_on(cpu, work); in schedule_on_each_cpu()
3045 fn(&ew->work); in execute_in_process_context()
3049 INIT_WORK(&ew->work, fn); in execute_in_process_context()
3050 schedule_work(&ew->work); in execute_in_process_context()
3341 static void pwq_unbound_release_workfn(struct work_struct *work) in pwq_unbound_release_workfn() argument
3343 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, in pwq_unbound_release_workfn()
4127 unsigned int work_busy(struct work_struct *work) in work_busy() argument
4133 if (work_pending(work)) in work_busy()
4137 pool = get_work_pool(work); in work_busy()
4140 if (find_worker_executing_work(pool, work)) in work_busy()
4235 static void pr_cont_work(bool comma, struct work_struct *work) in pr_cont_work() argument
4237 if (work->func == wq_barrier_func) { in pr_cont_work()
4240 barr = container_of(work, struct wq_barrier, work); in pr_cont_work()
4245 pr_cont("%s %pf", comma ? "," : "", work->func); in pr_cont_work()
4252 struct work_struct *work; in show_pwq() local
4281 list_for_each_entry(work, &worker->scheduled, entry) in show_pwq()
4282 pr_cont_work(false, work); in show_pwq()
4288 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4289 if (get_work_pwq(work) == pwq) { in show_pwq()
4298 list_for_each_entry(work, &pool->worklist, entry) { in show_pwq()
4299 if (get_work_pwq(work) != pwq) in show_pwq()
4302 pr_cont_work(comma, work); in show_pwq()
4303 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
4312 list_for_each_entry(work, &pwq->delayed_works, entry) { in show_pwq()
4313 pr_cont_work(comma, work); in show_pwq()
4314 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); in show_pwq()
4402 static void wq_unbind_fn(struct work_struct *work) in wq_unbind_fn() argument
4646 struct work_struct work; member
4652 static void work_for_cpu_fn(struct work_struct *work) in work_for_cpu_fn() argument
4654 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); in work_for_cpu_fn()
4674 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); in work_on_cpu()
4675 schedule_work_on(cpu, &wfc.work); in work_on_cpu()
4676 flush_work(&wfc.work); in work_on_cpu()
4677 destroy_work_on_stack(&wfc.work); in work_on_cpu()