/linux-4.1.27/include/trace/events/ |
H A D | workqueue.h | 12 TP_PROTO(struct work_struct *work), 14 TP_ARGS(work), 17 __field( void *, work ) 21 __entry->work = work; 24 TP_printk("work struct %p", __entry->work) 28 * workqueue_queue_work - called when a work gets queued 31 * @work: pointer to struct work_struct 33 * This event occurs when a work is queued immediately or once a 34 * delayed work is actually queued on a workqueue (ie: once the delay 40 struct work_struct *work), 42 TP_ARGS(req_cpu, pwq, work), 45 __field( void *, work ) 53 __entry->work = work; 54 __entry->function = work->func; 60 TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u", 61 __entry->work, __entry->function, __entry->workqueue, 66 * workqueue_activate_work - called when a work gets activated 67 * @work: pointer to struct work_struct 69 * This event occurs when a queued work is put on the active queue, 75 TP_PROTO(struct work_struct *work), 77 TP_ARGS(work) 82 * @work: pointer to struct work_struct 88 TP_PROTO(struct work_struct *work), 90 TP_ARGS(work), 93 __field( void *, work ) 98 __entry->work = work; 99 __entry->function = work->func; 102 TP_printk("work struct %p: function %pf", __entry->work, __entry->function) 107 * @work: pointer to struct work_struct 113 TP_PROTO(struct work_struct *work), 115 TP_ARGS(work)
|
H A D | writeback.h | 175 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), 176 TP_ARGS(bdi, work), 190 __entry->nr_pages = work->nr_pages; 191 __entry->sb_dev = work->sb ? work->sb->s_dev : 0; 192 __entry->sync_mode = work->sync_mode; 193 __entry->for_kupdate = work->for_kupdate; 194 __entry->range_cyclic = work->range_cyclic; 195 __entry->for_background = work->for_background; 196 __entry->reason = work->reason; 212 TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \ 213 TP_ARGS(bdi, work)) 306 struct wb_writeback_work *work, 308 TP_ARGS(wb, work, moved), 317 unsigned long *older_than_this = work->older_than_this; 323 __entry->reason = work->reason;
|
/linux-4.1.27/kernel/ |
H A D | task_work.c | 8 * task_work_add - ask the @task to execute @work->func() 10 * @work: the callback to run 13 * Queue @work for task_work_run() below and notify the @task if @notify. 14 * Fails if the @task is exiting/exited and thus it can't process this @work. 15 * Otherwise @work->func() will be called when the @task returns from kernel 25 task_work_add(struct task_struct *task, struct callback_head *work, bool notify) task_work_add() argument 33 work->next = head; task_work_add() 34 } while (cmpxchg(&task->task_works, head, work) != head); task_work_add() 42 * task_work_cancel - cancel a pending work added by task_work_add() 43 * @task: the task which should execute the work 44 * @func: identifies the work to remove 46 * Find the last queued pending work with ->func == @func and remove 50 * The found work or NULL if not found. 56 struct callback_head *work; task_work_cancel() local 61 * new entry before this work, we will find it again. Or task_work_cancel() 65 while ((work = ACCESS_ONCE(*pprev))) { task_work_cancel() 67 if (work->func != func) task_work_cancel() 68 pprev = &work->next; task_work_cancel() 69 else if (cmpxchg(pprev, work, work->next) == work) task_work_cancel() 74 return work; task_work_cancel() 83 * new work after task_work_run() returns. 88 struct callback_head *work, *head, *next; task_work_run() local 92 * work->func() can do task_work_add(), do not set task_work_run() 96 work = ACCESS_ONCE(task->task_works); task_work_run() 97 head = !work && (task->flags & PF_EXITING) ? task_work_run() 99 } while (cmpxchg(&task->task_works, work, head) != work); task_work_run() 101 if (!work) task_work_run() 105 * the first entry == work, cmpxchg(task_works) should task_work_run() 106 * fail, but it can play with *work and other entries. task_work_run() 114 next = work->next; task_work_run() 115 work->next = head; task_work_run() 116 head = work; task_work_run() 117 work = next; task_work_run() 118 } while (work); task_work_run() 120 work = head; task_work_run() 122 next = work->next; task_work_run() 123 work->func(work); task_work_run() 124 work = next; task_work_run() 126 } while (work); task_work_run()
|
H A D | irq_work.c | 29 static bool irq_work_claim(struct irq_work *work) irq_work_claim() argument 37 flags = work->flags & ~IRQ_WORK_PENDING; irq_work_claim() 40 oflags = cmpxchg(&work->flags, flags, nflags); irq_work_claim() 61 * Enqueue the irq_work @work on @cpu unless it's already pending 66 bool irq_work_queue_on(struct irq_work *work, int cpu) irq_work_queue_on() argument 68 /* All work should have been flushed before going offline */ irq_work_queue_on() 75 if (!irq_work_claim(work)) irq_work_queue_on() 78 if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) irq_work_queue_on() 86 /* Enqueue the irq work @work on the current CPU */ irq_work_queue() 87 bool irq_work_queue(struct irq_work *work) irq_work_queue() argument 90 if (!irq_work_claim(work)) irq_work_queue() 96 /* If the work is "lazy", handle it from next tick if any */ irq_work_queue() 97 if (work->flags & IRQ_WORK_LAZY) { irq_work_queue() 98 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && irq_work_queue() 102 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) irq_work_queue() 123 /* All work should have been flushed before going offline */ irq_work_needs_cpu() 132 struct irq_work *work; irq_work_run_list() local 142 work = llist_entry(llnode, struct irq_work, llnode); irq_work_run_list() 147 * Clear the PENDING bit, after this point the @work irq_work_run_list() 150 * to claim that work don't rely on us to handle their data irq_work_run_list() 153 flags = work->flags & ~IRQ_WORK_PENDING; irq_work_run_list() 154 xchg(&work->flags, flags); irq_work_run_list() 156 work->func(work); irq_work_run_list() 161 (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); irq_work_run_list() 189 void irq_work_sync(struct irq_work *work) irq_work_sync() argument 193 while (work->flags & IRQ_WORK_BUSY) irq_work_sync()
|
H A D | workqueue.c | 20 * normal work items and the other for high priority ones) and some extra 234 * The externally visible workqueue. It relays the issued work items to 242 int work_color; /* WQ: current work color */ 430 struct work_struct *work = addr; work_fixup_init() local 434 cancel_work_sync(work); work_fixup_init() 435 debug_object_init(work, &work_debug_descr); work_fixup_init() 449 struct work_struct *work = addr; work_fixup_activate() local 455 * This is not really a fixup. The work struct was work_fixup_activate() 459 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) { work_fixup_activate() 460 debug_object_init(work, &work_debug_descr); work_fixup_activate() 461 debug_object_activate(work, &work_debug_descr); work_fixup_activate() 481 struct work_struct *work = addr; work_fixup_free() local 485 cancel_work_sync(work); work_fixup_free() 486 debug_object_free(work, &work_debug_descr); work_fixup_free() 501 static inline void debug_work_activate(struct work_struct *work) debug_work_activate() argument 503 debug_object_activate(work, &work_debug_descr); debug_work_activate() 506 static inline void debug_work_deactivate(struct work_struct *work) debug_work_deactivate() argument 508 debug_object_deactivate(work, &work_debug_descr); debug_work_deactivate() 511 void __init_work(struct work_struct *work, int onstack) __init_work() argument 514 debug_object_init_on_stack(work, &work_debug_descr); __init_work() 516 debug_object_init(work, &work_debug_descr); __init_work() 520 void destroy_work_on_stack(struct work_struct *work) destroy_work_on_stack() argument 522 debug_object_free(work, &work_debug_descr); destroy_work_on_stack() 526 void destroy_delayed_work_on_stack(struct delayed_work *work) destroy_delayed_work_on_stack() argument 528 destroy_timer_on_stack(&work->timer); destroy_delayed_work_on_stack() 529 debug_object_free(&work->work, &work_debug_descr); destroy_delayed_work_on_stack() 534 static inline void debug_work_activate(struct work_struct *work) { } debug_work_deactivate() argument 535 static inline void debug_work_deactivate(struct work_struct *work) { } debug_work_deactivate() argument 594 static int get_work_color(struct work_struct *work) get_work_color() argument 596 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) & get_work_color() 606 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data 612 * work->data. These functions should only be called while the work is 616 * corresponding to a work. Pool is available once the work has been 618 * available only while the work item is queued. 620 * %WORK_OFFQ_CANCELING is used to mark a work item which is being 621 * canceled. While being canceled, a work item may have its PENDING set 625 static inline void set_work_data(struct work_struct *work, unsigned long data, set_work_data() argument 628 WARN_ON_ONCE(!work_pending(work)); set_work_data() 629 atomic_long_set(&work->data, data | flags | work_static(work)); set_work_data() 632 static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq, set_work_pwq() argument 635 set_work_data(work, (unsigned long)pwq, set_work_pwq() 639 static void set_work_pool_and_keep_pending(struct work_struct *work, set_work_pool_and_keep_pending() argument 642 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, set_work_pool_and_keep_pending() 646 static void set_work_pool_and_clear_pending(struct work_struct *work, set_work_pool_and_clear_pending() argument 651 * test_and_set_bit(PENDING) and ensures all updates to @work made set_work_pool_and_clear_pending() 656 set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); set_work_pool_and_clear_pending() 660 * work->current_func, which is executed afterwards. This possible set_work_pool_and_clear_pending() 662 * the same @work. E.g. consider this case: set_work_pool_and_clear_pending() 673 * 7 work->current_func() { set_work_pool_and_clear_pending() 680 * a @work is not queued in a hope, that CPU#1 will eventually set_work_pool_and_clear_pending() 681 * finish the queued @work. Meanwhile CPU#1 does not see set_work_pool_and_clear_pending() 688 static void clear_work_data(struct work_struct *work) clear_work_data() argument 691 set_work_data(work, WORK_STRUCT_NO_POOL, 0); clear_work_data() 694 static struct pool_workqueue *get_work_pwq(struct work_struct *work) get_work_pwq() argument 696 unsigned long data = atomic_long_read(&work->data); get_work_pwq() 705 * get_work_pool - return the worker_pool a given work was associated with 706 * @work: the work item of interest 717 * Return: The worker_pool @work was last associated with. %NULL if none. 719 static struct worker_pool *get_work_pool(struct work_struct *work) get_work_pool() argument 721 unsigned long data = atomic_long_read(&work->data); get_work_pool() 738 * get_work_pool_id - return the worker pool ID a given work is associated with 739 * @work: the work item of interest 741 * Return: The worker_pool ID @work was last associated with. 744 static int get_work_pool_id(struct work_struct *work) get_work_pool_id() argument 746 unsigned long data = atomic_long_read(&work->data); get_work_pool_id() 755 static void mark_work_canceling(struct work_struct *work) mark_work_canceling() argument 757 unsigned long pool_id = get_work_pool_id(work); mark_work_canceling() 760 set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING); mark_work_canceling() 763 static bool work_is_canceling(struct work_struct *work) work_is_canceling() argument 765 unsigned long data = atomic_long_read(&work->data); work_is_canceling() 980 * find_worker_executing_work - find worker which is executing a work 982 * @work: work to find worker for 984 * Find a worker which is executing @work on @pool by searching 985 * @pool->busy_hash which is keyed by the address of @work. For a worker 986 * to match, its current execution should match the address of @work and 987 * its work function. This is to avoid unwanted dependency between 988 * unrelated work executions through a work item being recycled while still 991 * This is a bit tricky. A work item may be freed once its execution 993 * another work item. If the same work item address ends up being reused 995 * recycled work item as currently executing and make it wait until the 998 * This function checks the work item address and work function to avoid 1000 * work function which can introduce dependency onto itself through a 1001 * recycled work item. Well, if somebody wants to shoot oneself in the 1003 * actually occurs, it should be easy to locate the culprit work function. 1009 * Pointer to worker which is executing @work if found, %NULL 1013 struct work_struct *work) find_worker_executing_work() 1018 (unsigned long)work) find_worker_executing_work() 1019 if (worker->current_work == work && find_worker_executing_work() 1020 worker->current_func == work->func) find_worker_executing_work() 1028 * @work: start of series of works to be scheduled 1029 * @head: target list to append @work to 1032 * Schedule linked works starting from @work to @head. Work series to 1033 * be scheduled starts at @work and includes any consecutive work with 1036 * If @nextp is not NULL, it's updated to point to the next work of 1037 * the last scheduled work. This allows move_linked_works() to be 1043 static void move_linked_works(struct work_struct *work, struct list_head *head, move_linked_works() argument 1052 list_for_each_entry_safe_from(work, n, NULL, entry) { list_for_each_entry_safe_from() 1053 list_move_tail(&work->entry, head); list_for_each_entry_safe_from() 1054 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED)) list_for_each_entry_safe_from() 1099 * the release work item is scheduled on a per-cpu workqueue. To put_pwq() 1125 static void pwq_activate_delayed_work(struct work_struct *work) pwq_activate_delayed_work() argument 1127 struct pool_workqueue *pwq = get_work_pwq(work); pwq_activate_delayed_work() 1129 trace_workqueue_activate_work(work); pwq_activate_delayed_work() 1130 move_linked_works(work, &pwq->pool->worklist, NULL); pwq_activate_delayed_work() 1131 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); pwq_activate_delayed_work() 1137 struct work_struct *work = list_first_entry(&pwq->delayed_works, pwq_activate_first_delayed() local 1140 pwq_activate_delayed_work(work); pwq_activate_first_delayed() 1146 * @color: color of work which left the queue 1148 * A work either has completed or is removed from pending queue, 1156 /* uncolored work items don't participate in flushing or nr_active */ pwq_dec_nr_in_flight() 1191 * try_to_grab_pending - steal work item from worklist and disable irq 1192 * @work: work item to steal 1193 * @is_dwork: @work is a delayed_work 1196 * Try to grab PENDING bit of @work. This function can handle @work in any 1200 * 1 if @work was pending and we successfully stole PENDING 1201 * 0 if @work was idle and we claimed PENDING 1203 * -ENOENT if someone else is canceling @work, this state may persist 1207 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting 1208 * interrupted while holding PENDING and @work off queue, irq must be 1217 static int try_to_grab_pending(struct work_struct *work, bool is_dwork, try_to_grab_pending() argument 1227 struct delayed_work *dwork = to_delayed_work(work); try_to_grab_pending() 1239 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) try_to_grab_pending() 1246 pool = get_work_pool(work); try_to_grab_pending() 1252 * work->data is guaranteed to point to pwq only while the work try_to_grab_pending() 1253 * item is queued on pwq->wq, and both updating work->data to point try_to_grab_pending() 1255 * pwq->pool->lock. This in turn guarantees that, if work->data try_to_grab_pending() 1256 * points to pwq which is associated with a locked pool, the work try_to_grab_pending() 1259 pwq = get_work_pwq(work); try_to_grab_pending() 1261 debug_work_deactivate(work); try_to_grab_pending() 1264 * A delayed work item cannot be grabbed directly because try_to_grab_pending() 1265 * it might have linked NO_COLOR work items which, if left try_to_grab_pending() 1267 * management later on and cause stall. Make sure the work try_to_grab_pending() 1270 if (*work_data_bits(work) & WORK_STRUCT_DELAYED) try_to_grab_pending() 1271 pwq_activate_delayed_work(work); try_to_grab_pending() 1273 list_del_init(&work->entry); try_to_grab_pending() 1274 pwq_dec_nr_in_flight(pwq, get_work_color(work)); try_to_grab_pending() 1276 /* work->data points to pwq iff queued, point to pool */ try_to_grab_pending() 1277 set_work_pool_and_keep_pending(work, pool->id); try_to_grab_pending() 1285 if (work_is_canceling(work)) try_to_grab_pending() 1292 * insert_work - insert a work into a pool 1293 * @pwq: pwq @work belongs to 1294 * @work: work to insert 1298 * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to 1304 static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, insert_work() argument 1309 /* we own @work, set data and link */ insert_work() 1310 set_work_pwq(work, pwq, extra_flags); insert_work() 1311 list_add_tail(&work->entry, head); insert_work() 1326 * Test whether @work is being queued from another work executing on the 1335 * Return %true iff I'm a worker execuing a work item on @wq. If is_chained_work() 1342 struct work_struct *work) __queue_work() 1351 * While a work item is PENDING && off queue, a task trying to __queue_work() 1358 debug_work_activate(work); __queue_work() 1368 /* pwq which will be used unless @work is executing elsewhere */ __queue_work() 1375 * If @work was previously on a different pool, it might still be __queue_work() 1376 * running there, in which case the work needs to be queued on that __queue_work() 1379 last_pool = get_work_pool(work); __queue_work() 1385 worker = find_worker_executing_work(last_pool, work); __queue_work() 1403 * work items are executing on it, so the retrying is guaranteed to __queue_work() 1418 trace_workqueue_queue_work(req_cpu, pwq, work); __queue_work() 1420 if (WARN_ON(!list_empty(&work->entry))) { __queue_work() 1429 trace_workqueue_activate_work(work); __queue_work() 1437 insert_work(pwq, work, worklist, work_flags); __queue_work() 1443 * queue_work_on - queue work on specific cpu 1444 * @cpu: CPU number to execute work on 1446 * @work: work to queue 1448 * We queue the work to a specific CPU, the caller must ensure it 1451 * Return: %false if @work was already on a queue, %true otherwise. 1454 struct work_struct *work) queue_work_on() 1461 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { queue_work_on() 1462 __queue_work(cpu, wq, work); queue_work_on() 1476 __queue_work(dwork->cpu, dwork->wq, &dwork->work); delayed_work_timer_fn() 1484 struct work_struct *work = &dwork->work; __queue_delayed_work() local 1489 WARN_ON_ONCE(!list_empty(&work->entry)); __queue_delayed_work() 1492 * If @delay is 0, queue @dwork->work immediately. This is for __queue_delayed_work() 1498 __queue_work(cpu, wq, &dwork->work); __queue_delayed_work() 1515 * queue_delayed_work_on - queue work on specific CPU after delay 1516 * @cpu: CPU number to execute work on 1518 * @dwork: work to queue 1521 * Return: %false if @work was already on a queue, %true otherwise. If 1528 struct work_struct *work = &dwork->work; queue_delayed_work_on() local 1535 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { queue_delayed_work_on() 1546 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 1547 * @cpu: CPU number to execute work on 1549 * @dwork: work to queue 1554 * zero, @work is guaranteed to be scheduled immediately regardless of its 1570 ret = try_to_grab_pending(&dwork->work, true, &flags); mod_delayed_work_on() 1839 static void send_mayday(struct work_struct *work) send_mayday() argument 1841 struct pool_workqueue *pwq = get_work_pwq(work); send_mayday() 1865 struct work_struct *work; pool_mayday_timeout() local 1877 list_for_each_entry(work, &pool->worklist, entry) pool_mayday_timeout() 1878 send_mayday(work); pool_mayday_timeout() 1967 * can proceed to executing work items. This means that anyone manage_workers() 1984 * process_one_work - process single work 1986 * @work: work to process 1988 * Process @work. This function contains all the logics necessary to 1989 * process a single work including synchronization against and 1992 * call this function to process a work. 1997 static void process_one_work(struct worker *worker, struct work_struct *work) 2001 struct pool_workqueue *pwq = get_work_pwq(work); 2012 * work->lockdep_map, make a copy and use that here. 2016 lockdep_copy_map(&lockdep_map, &work->lockdep_map); 2023 * A single work shouldn't be executed concurrently by 2025 * already processing the work. If so, defer the work to the 2028 collision = find_worker_executing_work(pool, work); 2030 move_linked_works(work, &collision->scheduled, NULL); 2035 debug_work_deactivate(work); variable 2036 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work); 2037 worker->current_work = work; 2038 worker->current_func = work->func; 2040 work_color = get_work_color(work); 2042 list_del_init(&work->entry); 2048 * execution of the pending work items. 2057 * pending work items for WORKER_NOT_RUNNING workers such as the 2065 * update to @work. Also, do this inside @pool->lock so that 2069 set_work_pool_and_clear_pending(work, pool->id); 2075 trace_workqueue_execute_start(work); variable 2076 worker->current_func(work); 2078 * While we must be careful to not use "work" after this, the trace 2081 trace_workqueue_execute_end(work); variable 2096 * kernels, where a requeueing work item waiting for something to 2097 * happen could deadlock with stop_machine as such work item could 2124 * may change while processing a work, so this function repeatedly 2125 * fetches a work from the top and executes it. 2134 struct work_struct *work = list_first_entry(&worker->scheduled, process_scheduled_works() local 2136 process_one_work(worker, work); process_scheduled_works() 2146 * work items regardless of their specific target workqueue. The only 2147 * exception is work items which belong to workqueues with a rescuer which 2187 * preparing to process a work or actually processing it. worker_thread() 2202 struct work_struct *work = worker_thread() local 2206 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) { worker_thread() 2208 process_one_work(worker, work); worker_thread() 2212 move_linked_works(work, &worker->scheduled, NULL); worker_thread() 2220 * pool->lock is held and there's no work to process and no need to worker_thread() 2240 * Regular work processing on a pool may block trying to create a new 2273 * shouldn't have any work pending, but @wq->maydays may still have rescuer_thread() 2275 * all the work items before the rescuer got to them. Go through rescuer_thread() 2288 struct work_struct *work, *n; rescuer_thread() local 2305 list_for_each_entry_safe(work, n, &pool->worklist, entry) rescuer_thread() 2306 if (get_work_pwq(work) == pwq) rescuer_thread() 2307 move_linked_works(work, scheduled, &n); rescuer_thread() 2313 * The above execution of rescued work items could rescuer_thread() 2317 * that such back-to-back work items, which may be rescuer_thread() 2366 struct work_struct work; member in struct:wq_barrier 2371 static void wq_barrier_func(struct work_struct *work) wq_barrier_func() argument 2373 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); wq_barrier_func() 2378 * insert_wq_barrier - insert a barrier work 2381 * @target: target work to attach @barr to 2390 * try_to_grab_pending() can't determine whether the work to be 2392 * flag of the previous work while there must be a valid next work 2393 * after a work with LINKED flag set. 2414 INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); insert_wq_barrier() 2415 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); insert_wq_barrier() 2434 debug_work_activate(&barr->work); insert_wq_barrier() 2435 insert_work(pwq, &barr->work, head, insert_wq_barrier() 2443 * @work_color: new work color, < 0 for no-op 2511 * flush_workqueue - ensure that any scheduled work has run to completion. 2514 * This function sleeps until all work items which were queued on entry 2669 * work items on @wq can queue further work items on it. @wq is flushed 2718 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) start_flush_work() argument 2727 pool = get_work_pool(work); start_flush_work() 2735 pwq = get_work_pwq(work); start_flush_work() 2740 worker = find_worker_executing_work(pool, work); start_flush_work() 2746 insert_wq_barrier(pwq, barr, work, worker); start_flush_work() 2750 * If @max_active is 1 or rescuer is in use, flushing another work start_flush_work() 2768 * flush_work - wait for a work to finish executing the last queueing instance 2769 * @work: the work to flush 2771 * Wait until @work has finished execution. @work is guaranteed to be idle 2775 * %true if flush_work() waited for the work to finish execution, 2778 bool flush_work(struct work_struct *work) flush_work() argument 2782 lock_map_acquire(&work->lockdep_map); flush_work() 2783 lock_map_release(&work->lockdep_map); flush_work() 2785 if (start_flush_work(work, &barr)) { flush_work() 2787 destroy_work_on_stack(&barr.work); flush_work() 2797 struct work_struct *work; member in struct:cwt_wait 2804 if (cwait->work != key) cwt_wakefn() 2809 static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) __cancel_work_timer() argument 2816 ret = try_to_grab_pending(work, is_dwork, &flags); __cancel_work_timer() 2819 * finish. flush_work() doesn't work for PREEMPT_NONE __cancel_work_timer() 2820 * because we may get scheduled between @work's completion __cancel_work_timer() 2823 * as @work is no longer busy, try_to_grab_pending() will __cancel_work_timer() 2824 * return -ENOENT as @work is still being canceled and the __cancel_work_timer() 2830 * wake function which matches @work along with exclusive __cancel_work_timer() 2838 cwait.work = work; __cancel_work_timer() 2842 if (work_is_canceling(work)) __cancel_work_timer() 2848 /* tell other tasks trying to grab @work to back off */ __cancel_work_timer() 2849 mark_work_canceling(work); __cancel_work_timer() 2852 flush_work(work); __cancel_work_timer() 2853 clear_work_data(work); __cancel_work_timer() 2862 __wake_up(&cancel_waitq, TASK_NORMAL, 1, work); __cancel_work_timer() 2868 * cancel_work_sync - cancel a work and wait for it to finish 2869 * @work: the work to cancel 2871 * Cancel @work and wait for its execution to finish. This function 2872 * can be used even if the work re-queues itself or migrates to 2873 * another workqueue. On return from this function, @work is 2876 * cancel_work_sync(&delayed_work->work) must not be used for 2879 * The caller must ensure that the workqueue on which @work was last 2883 * %true if @work was pending, %false otherwise. 2885 bool cancel_work_sync(struct work_struct *work) cancel_work_sync() argument 2887 return __cancel_work_timer(work, false); cancel_work_sync() 2893 * @dwork: the delayed work to flush 2895 * Delayed timer is cancelled and the pending work is queued for 2900 * %true if flush_work() waited for the work to finish execution, 2907 __queue_work(dwork->cpu, dwork->wq, &dwork->work); flush_delayed_work() 2909 return flush_work(&dwork->work); flush_delayed_work() 2914 * cancel_delayed_work - cancel a delayed work 2923 * The work callback function may still be running on return, unless 2924 * it returns %true and the work doesn't re-arm itself. Explicitly flush or 2935 ret = try_to_grab_pending(&dwork->work, true, &flags); cancel_delayed_work() 2941 set_work_pool_and_clear_pending(&dwork->work, cancel_delayed_work() 2942 get_work_pool_id(&dwork->work)); cancel_delayed_work() 2949 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 2950 * @dwork: the delayed work cancel 2959 return __cancel_work_timer(&dwork->work, true); cancel_delayed_work_sync() 2986 struct work_struct *work = per_cpu_ptr(works, cpu); for_each_online_cpu() local 2988 INIT_WORK(work, func); for_each_online_cpu() 2989 schedule_work_on(cpu, work); for_each_online_cpu() 3001 * flush_scheduled_work - ensure that any scheduled work has run to completion. 3010 * One of the work items currently on the workqueue needs to acquire 3013 * Your code is running in the context of a work routine. 3016 * occur very often. It depends on what work items are on the workqueue and 3020 * need to know that a particular work item isn't queued and isn't running. 3033 * @ew: guaranteed storage for the execute work structure (must 3034 * be available when the work executes) 3045 fn(&ew->work); execute_in_process_context() 3049 INIT_WORK(&ew->work, fn); execute_in_process_context() 3050 schedule_work(&ew->work); execute_in_process_context() 3341 static void pwq_unbound_release_workfn(struct work_struct *work) pwq_unbound_release_workfn() argument 3343 struct pool_workqueue *pwq = container_of(work, struct pool_workqueue, pwq_unbound_release_workfn() 3376 * workqueue's saved_max_active and activate delayed work items 3657 * possibles CPUs in @attrs->cpumask so that work items are affine to the 3658 * NUMA node it was issued on. Older pwqs are released as in-flight work 3659 * items finish. Note that a work item which repeatedly requeues itself 3720 * already executing the work items for the workqueue will lose their CPU 3723 * affinity, it's the user's responsibility to flush the work item from 3957 * Safely destroy a workqueue. All work currently pending will be done first. 4065 * work functions to determine whether it's being run off the rescuer task. 4117 * work_busy - test whether a work is currently pending or running 4118 * @work: the work to be tested 4120 * Test whether @work is currently pending or running. There is no 4127 unsigned int work_busy(struct work_struct *work) work_busy() argument 4133 if (work_pending(work)) work_busy() 4137 pool = get_work_pool(work); work_busy() 4140 if (find_worker_executing_work(pool, work)) work_busy() 4151 * set_worker_desc - set description for the current work item 4155 * This function can be called by a running work function to describe what 4156 * the work item is about. If the worker task gets dumped, this 4178 * If @task is a worker and currently executing a work item, print out the 4180 * set_worker_desc() by the currently executing work item. 4235 static void pr_cont_work(bool comma, struct work_struct *work) pr_cont_work() argument 4237 if (work->func == wq_barrier_func) { pr_cont_work() 4240 barr = container_of(work, struct wq_barrier, work); pr_cont_work() 4245 pr_cont("%s %pf", comma ? "," : "", work->func); pr_cont_work() 4252 struct work_struct *work; show_pwq() local 4281 list_for_each_entry(work, &worker->scheduled, entry) show_pwq() 4282 pr_cont_work(false, work); show_pwq() 4288 list_for_each_entry(work, &pool->worklist, entry) { show_pwq() 4289 if (get_work_pwq(work) == pwq) { show_pwq() 4298 list_for_each_entry(work, &pool->worklist, entry) { show_pwq() 4299 if (get_work_pwq(work) != pwq) show_pwq() 4302 pr_cont_work(comma, work); show_pwq() 4303 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); show_pwq() 4312 list_for_each_entry(work, &pwq->delayed_works, entry) { show_pwq() 4313 pr_cont_work(comma, work); show_pwq() 4314 comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED); show_pwq() 4391 * are a lot of assumptions on strong associations among work, pwq and 4402 static void wq_unbind_fn(struct work_struct *work) wq_unbind_fn() argument 4448 * unbound chain execution of currently pending work items. for_each_cpu_worker_pool() 4499 * work. Kick all idle workers so that they migrate to the for_each_pool_worker() 4646 struct work_struct work; member in struct:work_for_cpu 4652 static void work_for_cpu_fn(struct work_struct *work) work_for_cpu_fn() argument 4654 struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); work_for_cpu_fn() 4674 INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); work_on_cpu() 4675 schedule_work_on(cpu, &wfc.work); work_on_cpu() 4676 flush_work(&wfc.work); work_on_cpu() 4677 destroy_work_on_stack(&wfc.work); work_on_cpu() 4802 * max_active RW int : maximum number of in-flight work items 1012 find_worker_executing_work(struct worker_pool *pool, struct work_struct *work) find_worker_executing_work() argument 1341 __queue_work(int cpu, struct workqueue_struct *wq, struct work_struct *work) __queue_work() argument 1453 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) queue_work_on() argument
|
H A D | kthread.c | 549 struct kthread_work *work; kthread_worker_fn() local 564 work = NULL; kthread_worker_fn() 567 work = list_first_entry(&worker->work_list, kthread_worker_fn() 569 list_del_init(&work->node); kthread_worker_fn() 571 worker->current_work = work; kthread_worker_fn() 574 if (work) { kthread_worker_fn() 576 work->func(work); kthread_worker_fn() 585 /* insert @work before @pos in @worker */ insert_kthread_work() 587 struct kthread_work *work, insert_kthread_work() 592 list_add_tail(&work->node, pos); insert_kthread_work() 593 work->worker = worker; insert_kthread_work() 601 * @work: kthread_work to queue 603 * Queue @work to work processor @task for async execution. @task 605 * if @work was successfully queued, %false if it was already pending. 608 struct kthread_work *work) queue_kthread_work() 614 if (list_empty(&work->node)) { queue_kthread_work() 615 insert_kthread_work(worker, work, &worker->work_list); queue_kthread_work() 624 struct kthread_work work; member in struct:kthread_flush_work 628 static void kthread_flush_work_fn(struct kthread_work *work) kthread_flush_work_fn() argument 631 container_of(work, struct kthread_flush_work, work); kthread_flush_work_fn() 637 * @work: work to flush 639 * If @work is queued or executing, wait for it to finish execution. 641 void flush_kthread_work(struct kthread_work *work) flush_kthread_work() argument 644 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), flush_kthread_work() 651 worker = work->worker; flush_kthread_work() 656 if (work->worker != worker) { flush_kthread_work() 661 if (!list_empty(&work->node)) flush_kthread_work() 662 insert_kthread_work(worker, &fwork.work, work->node.next); flush_kthread_work() 663 else if (worker->current_work == work) flush_kthread_work() 664 insert_kthread_work(worker, &fwork.work, worker->work_list.next); flush_kthread_work() 685 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), flush_kthread_worker() 689 queue_kthread_work(worker, &fwork.work); flush_kthread_worker() 586 insert_kthread_work(struct kthread_worker *worker, struct kthread_work *work, struct list_head *pos) insert_kthread_work() argument 607 queue_kthread_work(struct kthread_worker *worker, struct kthread_work *work) queue_kthread_work() argument
|
H A D | stop_machine.c | 73 /* queue @work to @stopper. if offline, @work is completed immediately */ cpu_stop_queue_work() 74 static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) cpu_stop_queue_work() argument 84 list_add_tail(&work->list, &stopper->works); cpu_stop_queue_work() 87 cpu_stop_signal_done(work->done, false); cpu_stop_queue_work() 119 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; stop_one_cpu() local 122 cpu_stop_queue_work(cpu, &work); stop_one_cpu() 334 struct cpu_stop_work *work; queue_stop_cpus_work() local 339 work = &per_cpu(stop_cpus_work, cpu); for_each_cpu() 340 work->fn = fn; for_each_cpu() 341 work->arg = arg; for_each_cpu() 342 work->done = done; for_each_cpu() 451 struct cpu_stop_work *work; cpu_stopper_thread() local 455 work = NULL; cpu_stopper_thread() 458 work = list_first_entry(&stopper->works, cpu_stopper_thread() 460 list_del_init(&work->list); cpu_stopper_thread() 464 if (work) { cpu_stopper_thread() 465 cpu_stop_fn_t fn = work->fn; cpu_stopper_thread() 466 void *arg = work->arg; cpu_stopper_thread() 467 struct cpu_stop_done *done = work->done; cpu_stopper_thread() 499 struct cpu_stop_work *work; cpu_stop_park() local 504 list_for_each_entry(work, &stopper->works, list) cpu_stop_park() 505 cpu_stop_signal_done(work->done, false); cpu_stop_park() 633 /* Schedule work on other CPUs and execute directly for local CPU */ stop_machine_from_inactive_cpu()
|
/linux-4.1.27/drivers/gpu/drm/ |
H A D | drm_flip_work.c | 28 * drm_flip_work_allocate_task - allocate a flip-work task 48 * @work: the flip-work 52 * func) on a work queue after drm_flip_work_commit() is called. 54 void drm_flip_work_queue_task(struct drm_flip_work *work, drm_flip_work_queue_task() argument 59 spin_lock_irqsave(&work->lock, flags); drm_flip_work_queue_task() 60 list_add_tail(&task->node, &work->queued); drm_flip_work_queue_task() 61 spin_unlock_irqrestore(&work->lock, flags); drm_flip_work_queue_task() 66 * drm_flip_work_queue - queue work 67 * @work: the flip-work 70 * Queues work, that will later be run (passed back to drm_flip_func_t 71 * func) on a work queue after drm_flip_work_commit() is called. 73 void drm_flip_work_queue(struct drm_flip_work *work, void *val) drm_flip_work_queue() argument 80 drm_flip_work_queue_task(work, task); drm_flip_work_queue() 82 DRM_ERROR("%s could not allocate task!\n", work->name); drm_flip_work_queue() 83 work->func(work, val); drm_flip_work_queue() 89 * drm_flip_work_commit - commit queued work 90 * @work: the flip-work 91 * @wq: the work-queue to run the queued work on 93 * Trigger work previously queued by drm_flip_work_queue() to run 94 * on a workqueue. The typical usage would be to queue work (via 96 * prior), and then from vblank irq commit the queued work. 98 void drm_flip_work_commit(struct drm_flip_work *work, drm_flip_work_commit() argument 103 spin_lock_irqsave(&work->lock, flags); drm_flip_work_commit() 104 list_splice_tail(&work->queued, &work->commited); drm_flip_work_commit() 105 INIT_LIST_HEAD(&work->queued); drm_flip_work_commit() 106 spin_unlock_irqrestore(&work->lock, flags); drm_flip_work_commit() 107 queue_work(wq, &work->worker); drm_flip_work_commit() 113 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker); flip_worker() local 121 spin_lock_irqsave(&work->lock, flags); flip_worker() 122 list_splice_tail(&work->commited, &tasks); flip_worker() 123 INIT_LIST_HEAD(&work->commited); flip_worker() 124 spin_unlock_irqrestore(&work->lock, flags); flip_worker() 130 work->func(work, task->data); flip_worker() 137 * drm_flip_work_init - initialize flip-work 138 * @work: the flip-work to initialize 140 * @func: the callback work function 142 * Initializes/allocates resources for the flip-work 144 void drm_flip_work_init(struct drm_flip_work *work, drm_flip_work_init() argument 147 work->name = name; drm_flip_work_init() 148 INIT_LIST_HEAD(&work->queued); drm_flip_work_init() 149 INIT_LIST_HEAD(&work->commited); drm_flip_work_init() 150 spin_lock_init(&work->lock); drm_flip_work_init() 151 work->func = func; drm_flip_work_init() 153 INIT_WORK(&work->worker, flip_worker); drm_flip_work_init() 158 * drm_flip_work_cleanup - cleans up flip-work 159 * @work: the flip-work to cleanup 161 * Destroy resources allocated for the flip-work 163 void drm_flip_work_cleanup(struct drm_flip_work *work) drm_flip_work_cleanup() argument 165 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited)); drm_flip_work_cleanup()
|
/linux-4.1.27/virt/kvm/ |
H A D | async_pf.c | 32 struct kvm_async_pf *work) kvm_async_page_present_sync() 35 kvm_arch_async_page_present(vcpu, work); kvm_async_page_present_sync() 39 struct kvm_async_pf *work) kvm_async_page_present_async() 42 kvm_arch_async_page_present(vcpu, work); kvm_async_page_present_async() 72 static void async_pf_execute(struct work_struct *work) async_pf_execute() argument 75 container_of(work, struct kvm_async_pf, work); async_pf_execute() 106 /* cancel outstanding work queue item */ kvm_clear_async_pf_completion_queue() 108 struct kvm_async_pf *work = kvm_clear_async_pf_completion_queue() local 110 typeof(*work), queue); kvm_clear_async_pf_completion_queue() 111 list_del(&work->queue); kvm_clear_async_pf_completion_queue() 114 flush_work(&work->work); kvm_clear_async_pf_completion_queue() 116 if (cancel_work_sync(&work->work)) { kvm_clear_async_pf_completion_queue() 117 mmput(work->mm); kvm_clear_async_pf_completion_queue() 118 kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */ kvm_clear_async_pf_completion_queue() 119 kmem_cache_free(async_pf_cache, work); kvm_clear_async_pf_completion_queue() 126 struct kvm_async_pf *work = kvm_clear_async_pf_completion_queue() local 128 typeof(*work), link); kvm_clear_async_pf_completion_queue() 129 list_del(&work->link); kvm_clear_async_pf_completion_queue() 130 kmem_cache_free(async_pf_cache, work); kvm_clear_async_pf_completion_queue() 139 struct kvm_async_pf *work; kvm_check_async_pf_completion() local 144 work = list_first_entry(&vcpu->async_pf.done, typeof(*work), kvm_check_async_pf_completion() 146 list_del(&work->link); kvm_check_async_pf_completion() 149 kvm_arch_async_page_ready(vcpu, work); kvm_check_async_pf_completion() 150 kvm_async_page_present_async(vcpu, work); kvm_check_async_pf_completion() 152 list_del(&work->queue); kvm_check_async_pf_completion() 154 kmem_cache_free(async_pf_cache, work); kvm_check_async_pf_completion() 161 struct kvm_async_pf *work; kvm_setup_async_pf() local 166 /* setup delayed work */ kvm_setup_async_pf() 172 work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN); kvm_setup_async_pf() 173 if (!work) kvm_setup_async_pf() 176 work->wakeup_all = false; kvm_setup_async_pf() 177 work->vcpu = vcpu; kvm_setup_async_pf() 178 work->gva = gva; kvm_setup_async_pf() 179 work->addr = hva; kvm_setup_async_pf() 180 work->arch = *arch; kvm_setup_async_pf() 181 work->mm = current->mm; kvm_setup_async_pf() 182 atomic_inc(&work->mm->mm_users); kvm_setup_async_pf() 183 kvm_get_kvm(work->vcpu->kvm); kvm_setup_async_pf() 187 if (unlikely(kvm_is_error_hva(work->addr))) kvm_setup_async_pf() 190 INIT_WORK(&work->work, async_pf_execute); kvm_setup_async_pf() 191 if (!schedule_work(&work->work)) kvm_setup_async_pf() 194 list_add_tail(&work->queue, &vcpu->async_pf.queue); kvm_setup_async_pf() 196 kvm_arch_async_page_not_present(vcpu, work); kvm_setup_async_pf() 199 kvm_put_kvm(work->vcpu->kvm); kvm_setup_async_pf() 200 mmput(work->mm); kvm_setup_async_pf() 201 kmem_cache_free(async_pf_cache, work); kvm_setup_async_pf() 207 struct kvm_async_pf *work; kvm_async_pf_wakeup_all() local 212 work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC); kvm_async_pf_wakeup_all() 213 if (!work) kvm_async_pf_wakeup_all() 216 work->wakeup_all = true; kvm_async_pf_wakeup_all() 217 INIT_LIST_HEAD(&work->queue); /* for list_del to work */ kvm_async_pf_wakeup_all() 220 list_add_tail(&work->link, &vcpu->async_pf.done); kvm_async_pf_wakeup_all() 31 kvm_async_page_present_sync(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) kvm_async_page_present_sync() argument 38 kvm_async_page_present_async(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) kvm_async_page_present_async() argument
|
/linux-4.1.27/sound/isa/msnd/ |
H A D | msnd_classic.c | 1 /* The work is in msnd_pinnacle.c, just define MSND_CLASSIC before it. */
|
/linux-4.1.27/sound/oss/ |
H A D | msnd_classic.c | 1 /* The work is in msnd_pinnacle.c, just define MSND_CLASSIC before it. */
|
/linux-4.1.27/fs/btrfs/ |
H A D | async-thread.c | 37 /* List head pointing to ordered work list */ 57 static void normal_work_helper(struct btrfs_work *work); 62 struct btrfs_work *work = container_of(arg, struct btrfs_work, \ 64 normal_work_helper(work); \ 176 * Hook for threshold which will be called before executing the work, 226 struct btrfs_work *work; run_ordered_work() local 234 work = list_entry(list->next, struct btrfs_work, run_ordered_work() 236 if (!test_bit(WORK_DONE_BIT, &work->flags)) run_ordered_work() 241 * we leave the work item on the list as a barrier so run_ordered_work() 242 * that later work items that are done don't have their run_ordered_work() 245 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) run_ordered_work() 247 trace_btrfs_ordered_sched(work); run_ordered_work() 249 work->ordered_func(work); run_ordered_work() 253 list_del(&work->ordered_list); run_ordered_work() 260 work->ordered_free(work); run_ordered_work() 261 trace_btrfs_all_work_done(work); run_ordered_work() 266 static void normal_work_helper(struct btrfs_work *work) normal_work_helper() argument 272 * We should not touch things inside work in the following cases: normal_work_helper() 273 * 1) after work->func() if it has no ordered_free normal_work_helper() 274 * Since the struct is freed in work->func(). normal_work_helper() 276 * The work may be freed in other threads almost instantly. normal_work_helper() 279 if (work->ordered_func) normal_work_helper() 281 wq = work->wq; normal_work_helper() 283 trace_btrfs_work_sched(work); normal_work_helper() 285 work->func(work); normal_work_helper() 287 set_bit(WORK_DONE_BIT, &work->flags); normal_work_helper() 291 trace_btrfs_all_work_done(work); normal_work_helper() 294 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, btrfs_init_work() argument 299 work->func = func; btrfs_init_work() 300 work->ordered_func = ordered_func; btrfs_init_work() 301 work->ordered_free = ordered_free; btrfs_init_work() 302 INIT_WORK(&work->normal_work, uniq_func); btrfs_init_work() 303 INIT_LIST_HEAD(&work->ordered_list); btrfs_init_work() 304 work->flags = 0; btrfs_init_work() 308 struct btrfs_work *work) __btrfs_queue_work() 312 work->wq = wq; __btrfs_queue_work() 314 if (work->ordered_func) { __btrfs_queue_work() 316 list_add_tail(&work->ordered_list, &wq->ordered_list); __btrfs_queue_work() 319 trace_btrfs_work_queued(work); __btrfs_queue_work() 320 queue_work(wq->normal_wq, &work->normal_work); __btrfs_queue_work() 324 struct btrfs_work *work) btrfs_queue_work() 328 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) btrfs_queue_work() 332 __btrfs_queue_work(dest_wq, work); btrfs_queue_work() 362 void btrfs_set_work_high_priority(struct btrfs_work *work) btrfs_set_work_high_priority() argument 364 set_bit(WORK_HIGH_PRIO_BIT, &work->flags); btrfs_set_work_high_priority() 307 __btrfs_queue_work(struct __btrfs_workqueue *wq, struct btrfs_work *work) __btrfs_queue_work() argument 323 btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work) btrfs_queue_work() argument
|
H A D | async-thread.h | 72 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper, 77 struct btrfs_work *work); 80 void btrfs_set_work_high_priority(struct btrfs_work *work);
|
/linux-4.1.27/drivers/staging/octeon/ |
H A D | ethernet-rx.c | 83 * @work: Work queue entry pointing to the packet. 87 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) cvm_oct_check_rcv_error() argument 89 if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) { cvm_oct_check_rcv_error() 98 && ((work->word2.snoip.err_code == 5) cvm_oct_check_rcv_error() 99 || (work->word2.snoip.err_code == 7))) { cvm_oct_check_rcv_error() 109 int interface = cvmx_helper_get_interface_num(work->ipprt); cvm_oct_check_rcv_error() 110 int index = cvmx_helper_get_interface_index_num(work->ipprt); cvm_oct_check_rcv_error() 118 cvmx_phys_to_ptr(work->packet_ptr.s.addr); cvm_oct_check_rcv_error() 121 while (i < work->len - 1) { cvm_oct_check_rcv_error() 131 work->ipprt); cvm_oct_check_rcv_error() 133 work->packet_ptr.s.addr += i + 1; cvm_oct_check_rcv_error() 134 work->len -= i + 5; cvm_oct_check_rcv_error() 138 work->ipprt); cvm_oct_check_rcv_error() 140 work->packet_ptr.s.addr += i; cvm_oct_check_rcv_error() 141 work->len -= i + 4; cvm_oct_check_rcv_error() 142 for (i = 0; i < work->len; i++) { cvm_oct_check_rcv_error() 150 work->ipprt); cvm_oct_check_rcv_error() 152 cvmx_helper_dump_packet(work); cvm_oct_check_rcv_error() 154 cvm_oct_free_work(work); cvm_oct_check_rcv_error() 160 work->ipprt, work->word2.snoip.err_code); cvm_oct_check_rcv_error() 161 cvm_oct_free_work(work); cvm_oct_check_rcv_error() 193 /* Only allow work for our group (and preserve priorities) */ cvm_oct_napi_poll() 207 cvmx_wqe_t *work; cvm_oct_napi_poll() local 210 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); cvm_oct_napi_poll() 212 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT); cvm_oct_napi_poll() 214 prefetch(work); cvm_oct_napi_poll() 216 if (work == NULL) { cvm_oct_napi_poll() 225 pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - cvm_oct_napi_poll() 236 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; cvm_oct_napi_poll() 242 prefetch(cvm_oct_device[work->ipprt]); cvm_oct_napi_poll() 245 if (unlikely(work->word2.snoip.rcv_error)) { cvm_oct_napi_poll() 246 if (cvm_oct_check_rcv_error(work)) cvm_oct_napi_poll() 256 skb->data = skb->head + work->packet_ptr.s.addr - cvm_oct_napi_poll() 259 skb->len = work->len; cvm_oct_napi_poll() 267 skb = dev_alloc_skb(work->len); cvm_oct_napi_poll() 269 cvm_oct_free_work(work); cvm_oct_napi_poll() 275 * entirely stored in the work entry. cvm_oct_napi_poll() 277 if (unlikely(work->word2.s.bufs == 0)) { cvm_oct_napi_poll() 278 uint8_t *ptr = work->packet_data; cvm_oct_napi_poll() 280 if (likely(!work->word2.s.not_IP)) { cvm_oct_napi_poll() 285 if (work->word2.s.is_v6) cvm_oct_napi_poll() 290 memcpy(skb_put(skb, work->len), ptr, work->len); cvm_oct_napi_poll() 293 int segments = work->word2.s.bufs; cvm_oct_napi_poll() 295 work->packet_ptr; cvm_oct_napi_poll() 296 int len = work->len; cvm_oct_napi_poll() 333 if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) && cvm_oct_napi_poll() 334 cvm_oct_device[work->ipprt])) { cvm_oct_napi_poll() 335 struct net_device *dev = cvm_oct_device[work->ipprt]; cvm_oct_napi_poll() 346 if (unlikely(work->word2.s.not_IP || cvm_oct_napi_poll() 347 work->word2.s.IP_exc || cvm_oct_napi_poll() 348 work->word2.s.L4_error || cvm_oct_napi_poll() 349 !work->word2.s.tcp_or_udp)) cvm_oct_napi_poll() 355 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { cvm_oct_napi_poll() 390 work->ipprt); cvm_oct_napi_poll() 394 * Check to see if the skbuff and work share the same cvm_oct_napi_poll() 406 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, cvm_oct_napi_poll() 409 cvm_oct_free_work(work); cvm_oct_napi_poll() 421 /* No more work */ cvm_oct_napi_poll()
|
H A D | ethernet-tx.c | 563 /* Get a work queue entry */ cvm_oct_xmit_pow() 564 cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); cvm_oct_xmit_pow() local 566 if (unlikely(work == NULL)) { cvm_oct_xmit_pow() 567 printk_ratelimited("%s: Failed to allocate a work queue entry\n", cvm_oct_xmit_pow() 579 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); cvm_oct_xmit_pow() 605 * Fill in some of the work queue fields. We may need to add cvm_oct_xmit_pow() 608 work->hw_chksum = skb->csum; cvm_oct_xmit_pow() 609 work->len = skb->len; cvm_oct_xmit_pow() 610 work->ipprt = priv->port; cvm_oct_xmit_pow() 611 work->qos = priv->port & 0x7; cvm_oct_xmit_pow() 612 work->grp = pow_send_group; cvm_oct_xmit_pow() 613 work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE; cvm_oct_xmit_pow() 614 work->tag = pow_send_group; /* FIXME */ cvm_oct_xmit_pow() 616 work->word2.u64 = 0; cvm_oct_xmit_pow() 617 work->word2.s.bufs = 1; cvm_oct_xmit_pow() 618 work->packet_ptr.u64 = 0; cvm_oct_xmit_pow() 619 work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location); cvm_oct_xmit_pow() 620 work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL; cvm_oct_xmit_pow() 621 work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE; cvm_oct_xmit_pow() 622 work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7; cvm_oct_xmit_pow() 625 work->word2.s.ip_offset = 14; cvm_oct_xmit_pow() 627 work->word2.s.vlan_valid = 0; /* FIXME */ cvm_oct_xmit_pow() 628 work->word2.s.vlan_cfi = 0; /* FIXME */ cvm_oct_xmit_pow() 629 work->word2.s.vlan_id = 0; /* FIXME */ cvm_oct_xmit_pow() 630 work->word2.s.dec_ipcomp = 0; /* FIXME */ cvm_oct_xmit_pow() 632 work->word2.s.tcp_or_udp = cvm_oct_xmit_pow() 637 work->word2.s.dec_ipsec = 0; cvm_oct_xmit_pow() 639 work->word2.s.is_v6 = 0; cvm_oct_xmit_pow() 641 work->word2.s.software = 0; cvm_oct_xmit_pow() 643 work->word2.s.L4_error = 0; cvm_oct_xmit_pow() 645 work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) cvm_oct_xmit_pow() 650 work->word2.s.IP_exc = 0; cvm_oct_xmit_pow() 652 work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST); cvm_oct_xmit_pow() 653 work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST); cvm_oct_xmit_pow() 656 work->word2.s.not_IP = 0; cvm_oct_xmit_pow() 658 work->word2.s.rcv_error = 0; cvm_oct_xmit_pow() 660 work->word2.s.err_code = 0; cvm_oct_xmit_pow() 668 memcpy(work->packet_data, skb->data + 10, cvm_oct_xmit_pow() 669 sizeof(work->packet_data)); cvm_oct_xmit_pow() 672 work->word2.snoip.vlan_valid = 0; /* FIXME */ cvm_oct_xmit_pow() 673 work->word2.snoip.vlan_cfi = 0; /* FIXME */ cvm_oct_xmit_pow() 674 work->word2.snoip.vlan_id = 0; /* FIXME */ cvm_oct_xmit_pow() 675 work->word2.snoip.software = 0; /* Hardware would set to zero */ cvm_oct_xmit_pow() 677 work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP); cvm_oct_xmit_pow() 678 work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP); cvm_oct_xmit_pow() 679 work->word2.snoip.is_bcast = cvm_oct_xmit_pow() 681 work->word2.snoip.is_mcast = cvm_oct_xmit_pow() 683 work->word2.snoip.not_IP = 1; /* IP was done up above */ cvm_oct_xmit_pow() 686 work->word2.snoip.rcv_error = 0; cvm_oct_xmit_pow() 688 work->word2.snoip.err_code = 0; cvm_oct_xmit_pow() 690 memcpy(work->packet_data, skb->data, sizeof(work->packet_data)); cvm_oct_xmit_pow() 694 cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos, cvm_oct_xmit_pow() 695 work->grp); cvm_oct_xmit_pow() 739 /* Do the work in the tasklet. */ cvm_oct_tx_cleanup_watchdog()
|
/linux-4.1.27/include/linux/ |
H A D | irq_work.h | 27 void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) init_irq_work() argument 29 work->flags = 0; init_irq_work() 30 work->func = func; init_irq_work() 35 bool irq_work_queue(struct irq_work *work); 38 bool irq_work_queue_on(struct irq_work *work, int cpu); 42 void irq_work_sync(struct irq_work *work);
|
H A D | completion.h | 30 #define COMPLETION_INITIALIZER(work) \ 31 { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 33 #define COMPLETION_INITIALIZER_ONSTACK(work) \ 34 ({ init_completion(&work); work; }) 38 * @work: identifier for the completion structure 44 #define DECLARE_COMPLETION(work) \ 45 struct completion work = COMPLETION_INITIALIZER(work) 54 * @work: identifier for the completion structure 60 # define DECLARE_COMPLETION_ONSTACK(work) \ 61 struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) 63 # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
|
H A D | kthread.h | 56 * Simple work processor based on kthread. 64 typedef void (*kthread_work_func_t)(struct kthread_work *work); 84 #define KTHREAD_WORK_INIT(work, fn) { \ 85 .node = LIST_HEAD_INIT((work).node), \ 92 #define DEFINE_KTHREAD_WORK(work, fn) \ 93 struct kthread_work work = KTHREAD_WORK_INIT(work, fn) 117 #define init_kthread_work(work, fn) \ 119 memset((work), 0, sizeof(struct kthread_work)); \ 120 INIT_LIST_HEAD(&(work)->node); \ 121 (work)->func = (fn); \ 127 struct kthread_work *work); 128 void flush_kthread_work(struct kthread_work *work);
|
H A D | workqueue.h | 2 * workqueue.h --- work queue handling for Linux. 19 typedef void (*work_func_t)(struct work_struct *work); 23 * The first word is the work queue pointer and the flags rolled into 26 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) 29 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ 30 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ 32 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ 77 * When a work item is off queue, its high bits point to the last 114 struct work_struct work; member in struct:delayed_work 117 /* target workqueue and CPU ->timer uses to queue ->work */ 136 static inline struct delayed_work *to_delayed_work(struct work_struct *work) to_delayed_work() argument 138 return container_of(work, struct delayed_work, work); to_delayed_work() 142 struct work_struct work; member in struct:execute_work 165 .work = __WORK_INITIALIZER((n).work, (f)), \ 181 extern void __init_work(struct work_struct *work, int onstack); 182 extern void destroy_work_on_stack(struct work_struct *work); 183 extern void destroy_delayed_work_on_stack(struct delayed_work *work); work_static() 184 static inline unsigned int work_static(struct work_struct *work) work_static() argument 186 return *work_data_bits(work) & WORK_STRUCT_STATIC; work_static() 189 static inline void __init_work(struct work_struct *work, int onstack) { } destroy_work_on_stack() argument 190 static inline void destroy_work_on_stack(struct work_struct *work) { } destroy_delayed_work_on_stack() argument 191 static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { } work_static() argument 192 static inline unsigned int work_static(struct work_struct *work) { return 0; } work_static() argument 196 * initialize all of a work item in one go work_static() 199 * assignment of the work data initializer allows the compiler work_static() 231 INIT_WORK(&(_work)->work, (_func)); \ 239 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ 259 * work_pending - Find out whether a work item is currently pending 260 * @work: The work item in question 262 #define work_pending(work) \ 263 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) 266 * delayed_work_pending - Find out whether a delayable work item is currently 268 * @work: The work item in question 271 work_pending(&(w)->work) 294 * however, for example, a per-cpu work item scheduled from an 296 * excute the work item on that CPU breaking the idleness, which in 332 * system_highpri_wq is similar to system_wq but for work items which 368 * @max_active: max in-flight work items, 0 for default 404 * most one work item at any given time in the queued order. They are 429 struct work_struct *work); 431 struct delayed_work *work, unsigned long delay); 443 extern bool flush_work(struct work_struct *work); 444 extern bool cancel_work_sync(struct work_struct *work); 454 extern unsigned int work_busy(struct work_struct *work); 460 * queue_work - queue work on a workqueue 462 * @work: work to queue 464 * Returns %false if @work was already on a queue, %true otherwise. 466 * We queue the work to the CPU on which it was submitted, but if the CPU dies 470 struct work_struct *work) queue_work() 472 return queue_work_on(WORK_CPU_UNBOUND, wq, work); queue_work() 476 * queue_delayed_work - queue work on a workqueue after delay 478 * @dwork: delayable work to queue 491 * mod_delayed_work - modify delay of or queue a delayed work 493 * @dwork: work to queue 506 * schedule_work_on - put work task on a specific cpu 507 * @cpu: cpu to put the work task on 508 * @work: job to be done 512 static inline bool schedule_work_on(int cpu, struct work_struct *work) schedule_work_on() argument 514 return queue_work_on(cpu, system_wq, work); schedule_work_on() 518 * schedule_work - put work task in global workqueue 519 * @work: job to be done 521 * Returns %false if @work was already on the kernel-global workqueue and 528 static inline bool schedule_work(struct work_struct *work) schedule_work() argument 530 return queue_work(system_wq, work); schedule_work() 534 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 549 * schedule_delayed_work - put work task in global workqueue after delay 469 queue_work(struct workqueue_struct *wq, struct work_struct *work) queue_work() argument
|
H A D | i2c-pca-platform.h | 5 int gpio; /* pin to reset chip. driver will work when
|
H A D | jump_label_ratelimit.h | 11 struct delayed_work work; member in struct:static_key_deferred
|
H A D | inet.h | 4 * This work is derived from NET2Debugged, which is in turn derived 8 * This work was derived from Ross Biro's inspirational work
|
H A D | stop_machine.h | 42 struct work_struct work; member in struct:cpu_stop_work 57 static void stop_one_cpu_nowait_workfn(struct work_struct *work) stop_one_cpu_nowait_workfn() argument 60 container_of(work, struct cpu_stop_work, work); stop_one_cpu_nowait_workfn() 71 INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn); stop_one_cpu_nowait() 74 schedule_work(&work_buf->work); stop_one_cpu_nowait()
|
H A D | padata.h | 69 * @work: work struct for serialization. 74 struct work_struct work; member in struct:padata_serial_queue 84 * @pwork: work struct for parallelization. 85 * @swork: work struct for serialization. 87 * @work: work struct for parallelization. 95 struct work_struct work; member in struct:padata_parallel_queue
|
/linux-4.1.27/include/drm/ |
H A D | drm_flip_work.h | 34 * Util to queue up work to run from work-queue context after flip/vblank. 46 * @work: the flip work 49 * Callback function to be called for each of the queue'd work items after 52 typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); 55 * struct drm_flip_task - flip work task 57 * @data: data to pass to work->func 65 * struct drm_flip_work - flip work queue 83 void drm_flip_work_queue_task(struct drm_flip_work *work, 85 void drm_flip_work_queue(struct drm_flip_work *work, void *val); 86 void drm_flip_work_commit(struct drm_flip_work *work, 88 void drm_flip_work_init(struct drm_flip_work *work, 90 void drm_flip_work_cleanup(struct drm_flip_work *work);
|
/linux-4.1.27/drivers/staging/unisys/visorutil/ |
H A D | periodic_work.c | 19 * Helper functions to schedule periodic work in Linux kernel mode. 29 struct delayed_work work; member in struct:periodic_work 39 static void periodic_work_func(struct work_struct *work) periodic_work_func() argument 43 pw = container_of(work, struct periodic_work, work.work); periodic_work_func() 75 /** Call this from your periodic work worker function to schedule the next 78 * periodic work is no longer scheduled 90 } else if (queue_delayed_work(pw->workqueue, &pw->work, visor_periodic_work_nextperiod() 103 /** This function returns TRUE iff new periodic work was actually started. 104 * If this function returns FALSE, then no work was started 120 INIT_DELAYED_WORK(&pw->work, &periodic_work_func); visor_periodic_work_start() 121 if (queue_delayed_work(pw->workqueue, &pw->work, visor_periodic_work_start() 135 * work. 142 * same workqueue as the work you are trying to stop might be running 143 * on! If you violate this rule, visor_periodic_work_stop() MIGHT work, 145 * "waiting for delayed work...". This will happen if the delayed work 161 * You must NOT own any locks that are needed by the periodic work 163 * because stopping the periodic work often involves waiting for the last 164 * iteration of the periodic work function to complete. Again, if you hit 166 * "waiting for delayed work...". 176 if (cancel_delayed_work(&pw->work)) { visor_periodic_work_stop() 177 /* We get here if the delayed work was pending as visor_periodic_work_stop() 178 * delayed work, but was NOT run. visor_periodic_work_stop() 183 /* If we get here, either the delayed work: visor_periodic_work_stop()
|
/linux-4.1.27/drivers/net/wireless/cw1200/ |
H A D | sta.h | 61 void cw1200_event_handler(struct work_struct *work); 62 void cw1200_bss_loss_work(struct work_struct *work); 63 void cw1200_bss_params_work(struct work_struct *work); 64 void cw1200_keep_alive_work(struct work_struct *work); 65 void cw1200_tx_failure_work(struct work_struct *work); 81 void cw1200_join_timeout(struct work_struct *work); 82 void cw1200_unjoin_work(struct work_struct *work); 83 void cw1200_join_complete_work(struct work_struct *work); 84 void cw1200_wep_key_work(struct work_struct *work); 87 void cw1200_update_filtering_work(struct work_struct *work); 88 void cw1200_set_beacon_wakeup_period_work(struct work_struct *work); 93 void cw1200_ba_work(struct work_struct *work); 118 void cw1200_set_tim_work(struct work_struct *work); 119 void cw1200_set_cts_work(struct work_struct *work); 120 void cw1200_multicast_start_work(struct work_struct *work); 121 void cw1200_multicast_stop_work(struct work_struct *work);
|
H A D | scan.h | 26 struct work_struct work; member in struct:cw1200_scan 45 void cw1200_scan_work(struct work_struct *work); 46 void cw1200_scan_timeout(struct work_struct *work); 47 void cw1200_clear_recent_scan_work(struct work_struct *work); 54 void cw1200_probe_work(struct work_struct *work);
|
H A D | txrx.h | 56 void tx_policy_upload_work(struct work_struct *work); 85 void cw1200_tx_timeout(struct work_struct *work); 96 void cw1200_link_id_reset(struct work_struct *work); 102 void cw1200_link_id_work(struct work_struct *work); 103 void cw1200_link_id_gc_work(struct work_struct *work);
|
H A D | scan.c | 127 queue_work(priv->workqueue, &priv->scan.work); cw1200_hw_scan() 131 void cw1200_scan_work(struct work_struct *work) cw1200_scan_work() argument 133 struct cw1200_common *priv = container_of(work, struct cw1200_common, cw1200_scan_work() 134 scan.work); cw1200_scan_work() 150 cw1200_join_timeout(&priv->join_timeout.work); cw1200_scan_work() 264 queue_work(priv->workqueue, &priv->scan.work); cw1200_scan_work() 297 cw1200_scan_work(&priv->scan.work); cw1200_scan_complete() 327 void cw1200_clear_recent_scan_work(struct work_struct *work) cw1200_clear_recent_scan_work() argument 330 container_of(work, struct cw1200_common, cw1200_clear_recent_scan_work() 331 clear_recent_scan_work.work); cw1200_clear_recent_scan_work() 335 void cw1200_scan_timeout(struct work_struct *work) cw1200_scan_timeout() argument 338 container_of(work, struct cw1200_common, scan.timeout.work); cw1200_scan_timeout() 353 void cw1200_probe_work(struct work_struct *work) cw1200_probe_work() argument 356 container_of(work, struct cw1200_common, scan.probe_work.work); cw1200_probe_work() 383 wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe work.\n"); cw1200_probe_work()
|
H A D | pm.c | 126 static long cw1200_suspend_work(struct delayed_work *work) cw1200_suspend_work() argument 128 int ret = cancel_delayed_work(work); cw1200_suspend_work() 132 tmo = work->timer.expires - jiffies; cw1200_suspend_work() 142 struct delayed_work *work, cw1200_resume_work() 148 return queue_delayed_work(priv->workqueue, work, tmo); cw1200_resume_work() 231 /* Store delayed work states. */ cw1200_wow_suspend() 341 /* Resume delayed work */ cw1200_wow_resume() 141 cw1200_resume_work(struct cw1200_common *priv, struct delayed_work *work, unsigned long tmo) cw1200_resume_work() argument
|
/linux-4.1.27/arch/arm/mach-realview/include/mach/ |
H A D | barriers.h | 3 * controller to work around hardware errata causing the outer_sync()
|
/linux-4.1.27/drivers/input/misc/ |
H A D | gpio-beeper.c | 22 struct work_struct work; member in struct:gpio_beeper 32 static void gpio_beeper_work(struct work_struct *work) gpio_beeper_work() argument 34 struct gpio_beeper *beep = container_of(work, struct gpio_beeper, work); gpio_beeper_work() 51 /* Schedule work to actually turn the beeper on or off */ gpio_beeper_event() 52 schedule_work(&beep->work); gpio_beeper_event() 61 cancel_work_sync(&beep->work); gpio_beeper_close() 83 INIT_WORK(&beep->work, gpio_beeper_work); gpio_beeper_probe()
|
H A D | da9052_onkey.c | 25 struct delayed_work work; member in struct:da9052_onkey 50 * is simulated through work queue. da9052_onkey_query() 53 schedule_delayed_work(&onkey->work, da9052_onkey_query() 58 static void da9052_onkey_work(struct work_struct *work) da9052_onkey_work() argument 60 struct da9052_onkey *onkey = container_of(work, struct da9052_onkey, da9052_onkey_work() 61 work.work); da9052_onkey_work() 97 INIT_DELAYED_WORK(&onkey->work, da9052_onkey_work); da9052_onkey_probe() 126 cancel_delayed_work_sync(&onkey->work); da9052_onkey_probe() 139 cancel_delayed_work_sync(&onkey->work); da9052_onkey_remove()
|
H A D | da9055_onkey.c | 24 struct delayed_work work; member in struct:da9055_onkey 48 * Hence the deassertion of the pin is simulated through work queue. da9055_onkey_query() 51 schedule_delayed_work(&onkey->work, msecs_to_jiffies(10)); da9055_onkey_query() 55 static void da9055_onkey_work(struct work_struct *work) da9055_onkey_work() argument 57 struct da9055_onkey *onkey = container_of(work, struct da9055_onkey, da9055_onkey_work() 58 work.work); da9055_onkey_work() 110 INIT_DELAYED_WORK(&onkey->work, da9055_onkey_work); da9055_onkey_probe() 135 cancel_delayed_work_sync(&onkey->work); da9055_onkey_probe() 149 cancel_delayed_work_sync(&onkey->work); da9055_onkey_remove()
|
H A D | wm831x-on.c | 32 struct delayed_work work; member in struct:wm831x_on 40 static void wm831x_poll_on(struct work_struct *work) wm831x_poll_on() argument 42 struct wm831x_on *wm831x_on = container_of(work, struct wm831x_on, wm831x_poll_on() 43 work.work); wm831x_poll_on() 59 schedule_delayed_work(&wm831x_on->work, 100); wm831x_poll_on() 66 schedule_delayed_work(&wm831x_on->work, 0); wm831x_on_irq() 86 INIT_DELAYED_WORK(&wm831x_on->work, wm831x_poll_on); wm831x_on_probe() 131 cancel_delayed_work_sync(&wm831x_on->work); wm831x_on_remove()
|
H A D | pm8xxx-vibrator.c | 36 * @work: work structure to set the vibration parameters 45 struct work_struct work; member in struct:pm8xxx_vib 78 * @work: pointer to work_struct 80 static void pm8xxx_work_handler(struct work_struct *work) pm8xxx_work_handler() argument 82 struct pm8xxx_vib *vib = container_of(work, struct pm8xxx_vib, work); pm8xxx_work_handler() 117 cancel_work_sync(&vib->work); pm8xxx_vib_close() 139 schedule_work(&vib->work); pm8xxx_vib_play_effect() 163 INIT_WORK(&vib->work, pm8xxx_work_handler); pm8xxx_vib_probe()
|
H A D | sirfsoc-onkey.c | 22 struct delayed_work work; member in struct:sirfsoc_pwrc_drvdata 39 static void sirfsoc_pwrc_report_event(struct work_struct *work) sirfsoc_pwrc_report_event() argument 42 container_of(work, struct sirfsoc_pwrc_drvdata, work.work); sirfsoc_pwrc_report_event() 45 schedule_delayed_work(&pwrcdrv->work, sirfsoc_pwrc_report_event() 65 schedule_delayed_work(&pwrcdrv->work, sirfsoc_pwrc_isr() 98 cancel_delayed_work_sync(&pwrcdrv->work); sirfsoc_pwrc_close() 141 INIT_DELAYED_WORK(&pwrcdrv->work, sirfsoc_pwrc_report_event); sirfsoc_pwrc_probe() 188 * Do not mask pwrc interrupt as we want pwrc work as a wakeup source sirfsoc_pwrc_resume()
|
H A D | arizona-haptics.c | 28 struct work_struct work; member in struct:arizona_haptics 34 static void arizona_haptics_work(struct work_struct *work) arizona_haptics_work() argument 36 struct arizona_haptics *haptics = container_of(work, arizona_haptics_work() 38 work); arizona_haptics_work() 136 schedule_work(&haptics->work); arizona_haptics_play() 145 cancel_work_sync(&haptics->work); arizona_haptics_close() 171 INIT_WORK(&haptics->work, arizona_haptics_work); arizona_haptics_probe()
|
H A D | pwm-beeper.c | 28 struct work_struct work; member in struct:pwm_beeper 45 static void pwm_beeper_work(struct work_struct *work) pwm_beeper_work() argument 48 container_of(work, struct pwm_beeper, work); pwm_beeper_work() 76 schedule_work(&beeper->work); pwm_beeper_event() 83 cancel_work_sync(&beeper->work); pwm_beeper_stop() 118 INIT_WORK(&beeper->work, pwm_beeper_work); pwm_beeper_probe()
|
H A D | regulator-haptic.c | 28 struct work_struct work; member in struct:regulator_haptic 84 static void regulator_haptic_work(struct work_struct *work) regulator_haptic_work() argument 86 struct regulator_haptic *haptic = container_of(work, regulator_haptic_work() 87 struct regulator_haptic, work); regulator_haptic_work() 106 schedule_work(&haptic->work); regulator_haptic_play_effect() 115 cancel_work_sync(&haptic->work); regulator_haptic_close() 160 INIT_WORK(&haptic->work, regulator_haptic_work); regulator_haptic_probe()
|
H A D | max77693-haptic.c | 64 struct work_struct work; member in struct:max77693_haptic 175 static void max77693_haptic_play_work(struct work_struct *work) max77693_haptic_play_work() argument 178 container_of(work, struct max77693_haptic, work); max77693_haptic_play_work() 212 schedule_work(&haptic->work); max77693_haptic_play_effect() 237 cancel_work_sync(&haptic->work); max77693_haptic_close() 264 INIT_WORK(&haptic->work, max77693_haptic_play_work); max77693_haptic_probe()
|
H A D | max77843-haptic.c | 46 struct work_struct work; member in struct:max77843_haptic 156 static void max77843_haptic_play_work(struct work_struct *work) max77843_haptic_play_work() argument 159 container_of(work, struct max77843_haptic, work); max77843_haptic_play_work() 204 schedule_work(&haptic->work); max77843_haptic_play_effect() 233 cancel_work_sync(&haptic->work); max77843_haptic_close() 259 INIT_WORK(&haptic->work, max77843_haptic_play_work); max77843_haptic_probe()
|
H A D | max8997_haptic.c | 52 struct work_struct work; member in struct:max8997_haptic 220 static void max8997_haptic_play_effect_work(struct work_struct *work) max8997_haptic_play_effect_work() argument 223 container_of(work, struct max8997_haptic, work); max8997_haptic_play_effect_work() 240 schedule_work(&chip->work); max8997_haptic_play_effect() 249 cancel_work_sync(&chip->work); max8997_haptic_close() 279 INIT_WORK(&chip->work, max8997_haptic_play_effect_work); max8997_haptic_probe()
|
H A D | palmas-pwrbutton.c | 36 * @input_work: work for detecting release of key 58 * @work: work item to detect button release 60 static void palmas_power_button_work(struct work_struct *work) palmas_power_button_work() argument 62 struct palmas_pwron *pwron = container_of(work, palmas_power_button_work() 64 input_work.work); palmas_power_button_work() 267 * Cancel all pending work items for the power button, setup irq for wakeup
|
/linux-4.1.27/arch/m68k/hp300/ |
H A D | reboot.S | 6 * Do the dirty work of rebooting the machine. Basically we need to undo all the 12 /* XXX Doesn't work yet. Not sure why and can't be bothered to fix it at the moment. */
|
/linux-4.1.27/drivers/infiniband/core/ |
H A D | cm.c | 186 struct delayed_work work; member in struct:cm_work 197 struct cm_work work; /* Must be first. */ member in struct:cm_timewait_info 247 static void cm_work_handler(struct work_struct *work); 578 __be32 remote_id = timewait_info->work.remote_id; cm_insert_remote_id() 584 if (be32_lt(remote_id, cur_timewait_info->work.remote_id)) cm_insert_remote_id() 586 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id)) cm_insert_remote_id() 610 if (be32_lt(remote_id, timewait_info->work.remote_id)) cm_find_remote_id() 612 else if (be32_gt(remote_id, timewait_info->work.remote_id)) cm_find_remote_id() 733 struct cm_work *work; cm_dequeue_work() local 738 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); cm_dequeue_work() 739 list_del(&work->list); cm_dequeue_work() 740 return work; cm_dequeue_work() 743 static void cm_free_work(struct cm_work *work) cm_free_work() argument 745 if (work->mad_recv_wc) cm_free_work() 746 ib_free_recv_mad(work->mad_recv_wc); cm_free_work() 747 kfree(work); cm_free_work() 796 timewait_info->work.local_id = local_id; cm_create_timewait_info() 797 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); cm_create_timewait_info() 798 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; cm_create_timewait_info() 819 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, cm_enter_timewait() 841 struct cm_work *work; cm_destroy_id() local 922 while ((work = cm_dequeue_work(cm_id_priv)) != NULL) cm_destroy_id() 923 cm_free_work(work); cm_destroy_id() 1286 static void cm_format_req_event(struct cm_work *work, cm_format_req_event() argument 1293 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; cm_format_req_event() 1294 param = &work->cm_event.param.req_rcvd; cm_format_req_event() 1297 param->primary_path = &work->path[0]; cm_format_req_event() 1299 param->alternate_path = &work->path[1]; cm_format_req_event() 1317 work->cm_event.private_data = &req_msg->private_data; cm_format_req_event() 1321 struct cm_work *work) cm_process_work() 1326 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); cm_process_work() 1327 cm_free_work(work); cm_process_work() 1331 work = cm_dequeue_work(cm_id_priv); cm_process_work() 1333 BUG_ON(!work); cm_process_work() 1335 &work->cm_event); cm_process_work() 1336 cm_free_work(work); cm_process_work() 1399 static void cm_dup_req_handler(struct cm_work *work, cm_dup_req_handler() argument 1405 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. cm_dup_req_handler() 1412 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); cm_dup_req_handler() 1442 static struct cm_id_private * cm_match_req(struct cm_work *work, cm_match_req() argument 1449 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; cm_match_req() 1455 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, cm_match_req() 1456 timewait_info->work.remote_id); cm_match_req() 1459 cm_dup_req_handler(work, cur_cm_id_priv); cm_match_req() 1470 cm_issue_rej(work->port, work->mad_recv_wc, cm_match_req() 1483 cm_issue_rej(work->port, work->mad_recv_wc, cm_match_req() 1500 * in the work completion. 1525 static int cm_req_handler(struct cm_work *work) cm_req_handler() argument 1532 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; cm_req_handler() 1534 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); cm_req_handler() 1540 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, cm_req_handler() 1541 work->mad_recv_wc->recv_buf.grh, cm_req_handler() 1549 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; cm_req_handler() 1553 listen_cm_id_priv = cm_match_req(work, cm_id_priv); cm_req_handler() 1565 cm_process_routed_req(req_msg, work->mad_recv_wc->wc); cm_req_handler() 1566 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); cm_req_handler() 1568 memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN); cm_req_handler() 1569 work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id; cm_req_handler() 1570 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); cm_req_handler() 1572 ib_get_cached_gid(work->port->cm_dev->ib_device, cm_req_handler() 1573 work->port->port_num, 0, &work->path[0].sgid); cm_req_handler() 1575 &work->path[0].sgid, sizeof work->path[0].sgid, cm_req_handler() 1580 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); cm_req_handler() 1583 &work->path[0].sgid, cm_req_handler() 1584 sizeof work->path[0].sgid, NULL, 0); cm_req_handler() 1602 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id); cm_req_handler() 1603 cm_process_work(cm_id_priv, work); cm_req_handler() 1758 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) cm_format_rep_event() argument 1763 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; cm_format_rep_event() 1764 param = &work->cm_event.param.rep_rcvd; cm_format_rep_event() 1776 work->cm_event.private_data = &rep_msg->private_data; cm_format_rep_event() 1779 static void cm_dup_rep_handler(struct cm_work *work) cm_dup_rep_handler() argument 1786 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; cm_dup_rep_handler() 1792 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. cm_dup_rep_handler() 1794 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); cm_dup_rep_handler() 1822 static int cm_rep_handler(struct cm_work *work) cm_rep_handler() argument 1828 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; cm_rep_handler() 1831 cm_dup_rep_handler(work); cm_rep_handler() 1835 cm_format_rep_event(work, cm_id_priv->qp_type); cm_rep_handler() 1848 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; cm_rep_handler() 1867 cm_issue_rej(work->port, work->mad_recv_wc, cm_rep_handler() 1895 list_add_tail(&work->list, &cm_id_priv->work_list); cm_rep_handler() 1899 cm_process_work(cm_id_priv, work); cm_rep_handler() 1909 static int cm_establish_handler(struct cm_work *work) cm_establish_handler() argument 1915 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id); cm_establish_handler() 1928 list_add_tail(&work->list, &cm_id_priv->work_list); cm_establish_handler() 1932 cm_process_work(cm_id_priv, work); cm_establish_handler() 1941 static int cm_rtu_handler(struct cm_work *work) cm_rtu_handler() argument 1947 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; cm_rtu_handler() 1953 work->cm_event.private_data = &rtu_msg->private_data; cm_rtu_handler() 1959 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. cm_rtu_handler() 1968 list_add_tail(&work->list, &cm_id_priv->work_list); cm_rtu_handler() 1972 cm_process_work(cm_id_priv, work); cm_rtu_handler() 2131 static int cm_dreq_handler(struct cm_work *work) cm_dreq_handler() argument 2138 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; cm_dreq_handler() 2142 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. cm_dreq_handler() 2144 cm_issue_drep(work->port, work->mad_recv_wc); cm_dreq_handler() 2148 work->cm_event.private_data = &dreq_msg->private_data; cm_dreq_handler() 2167 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. cm_dreq_handler() 2169 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) cm_dreq_handler() 2181 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. cm_dreq_handler() 2191 list_add_tail(&work->list, &cm_id_priv->work_list); cm_dreq_handler() 2195 cm_process_work(cm_id_priv, work); cm_dreq_handler() 2205 static int cm_drep_handler(struct cm_work *work) cm_drep_handler() argument 2211 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; cm_drep_handler() 2217 work->cm_event.private_data = &drep_msg->private_data; cm_drep_handler() 2230 list_add_tail(&work->list, &cm_id_priv->work_list); cm_drep_handler() 2234 cm_process_work(cm_id_priv, work); cm_drep_handler() 2304 static void cm_format_rej_event(struct cm_work *work) cm_format_rej_event() argument 2309 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; cm_format_rej_event() 2310 param = &work->cm_event.param.rej_rcvd; cm_format_rej_event() 2314 work->cm_event.private_data = &rej_msg->private_data; cm_format_rej_event() 2334 (timewait_info->work.local_id ^ cm_acquire_rejected_id() 2351 static int cm_rej_handler(struct cm_work *work) cm_rej_handler() argument 2357 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; cm_rej_handler() 2362 cm_format_rej_event(work); cm_rej_handler() 2404 list_add_tail(&work->list, &cm_id_priv->work_list); cm_rej_handler() 2408 cm_process_work(cm_id_priv, work); cm_rej_handler() 2509 static int cm_mra_handler(struct cm_work *work) cm_mra_handler() argument 2515 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; cm_mra_handler() 2520 work->cm_event.private_data = &mra_msg->private_data; cm_mra_handler() 2521 work->cm_event.param.mra_rcvd.service_timeout = cm_mra_handler() 2548 atomic_long_inc(&work->port-> cm_mra_handler() 2557 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. cm_mra_handler() 2568 list_add_tail(&work->list, &cm_id_priv->work_list); cm_mra_handler() 2572 cm_process_work(cm_id_priv, work); cm_mra_handler() 2690 static int cm_lap_handler(struct cm_work *work) cm_lap_handler() argument 2699 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; cm_lap_handler() 2705 param = &work->cm_event.param.lap_rcvd; cm_lap_handler() 2706 param->alternate_path = &work->path[0]; cm_lap_handler() 2708 work->cm_event.private_data = &lap_msg->private_data; cm_lap_handler() 2719 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. cm_lap_handler() 2721 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) cm_lap_handler() 2735 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. cm_lap_handler() 2744 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, cm_lap_handler() 2745 work->mad_recv_wc->recv_buf.grh, cm_lap_handler() 2750 list_add_tail(&work->list, &cm_id_priv->work_list); cm_lap_handler() 2754 cm_process_work(cm_id_priv, work); cm_lap_handler() 2830 static int cm_apr_handler(struct cm_work *work) cm_apr_handler() argument 2836 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; cm_apr_handler() 2842 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status; cm_apr_handler() 2843 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info; cm_apr_handler() 2844 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length; cm_apr_handler() 2845 work->cm_event.private_data = &apr_msg->private_data; cm_apr_handler() 2860 list_add_tail(&work->list, &cm_id_priv->work_list); cm_apr_handler() 2864 cm_process_work(cm_id_priv, work); cm_apr_handler() 2873 static int cm_timewait_handler(struct cm_work *work) cm_timewait_handler() argument 2879 timewait_info = (struct cm_timewait_info *)work; cm_timewait_handler() 2884 cm_id_priv = cm_acquire_id(timewait_info->work.local_id, cm_timewait_handler() 2885 timewait_info->work.remote_id); cm_timewait_handler() 2898 list_add_tail(&work->list, &cm_id_priv->work_list); cm_timewait_handler() 2902 cm_process_work(cm_id_priv, work); cm_timewait_handler() 2975 static void cm_format_sidr_req_event(struct cm_work *work, cm_format_sidr_req_event() argument 2982 work->mad_recv_wc->recv_buf.mad; cm_format_sidr_req_event() 2983 param = &work->cm_event.param.sidr_req_rcvd; cm_format_sidr_req_event() 2986 param->port = work->port->port_num; cm_format_sidr_req_event() 2987 work->cm_event.private_data = &sidr_req_msg->private_data; cm_format_sidr_req_event() 2990 static int cm_sidr_req_handler(struct cm_work *work) cm_sidr_req_handler() argument 2997 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL); cm_sidr_req_handler() 3004 work->mad_recv_wc->recv_buf.mad; cm_sidr_req_handler() 3005 wc = work->mad_recv_wc->wc; cm_sidr_req_handler() 3008 cm_init_av_for_response(work->port, work->mad_recv_wc->wc, cm_sidr_req_handler() 3009 work->mad_recv_wc->recv_buf.grh, cm_sidr_req_handler() 3019 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. cm_sidr_req_handler() 3041 cm_format_sidr_req_event(work, &cur_cm_id_priv->id); cm_sidr_req_handler() 3042 cm_process_work(cm_id_priv, work); cm_sidr_req_handler() 3118 static void cm_format_sidr_rep_event(struct cm_work *work) cm_format_sidr_rep_event() argument 3124 work->mad_recv_wc->recv_buf.mad; cm_format_sidr_rep_event() 3125 param = &work->cm_event.param.sidr_rep_rcvd; cm_format_sidr_rep_event() 3131 work->cm_event.private_data = &sidr_rep_msg->private_data; cm_format_sidr_rep_event() 3134 static int cm_sidr_rep_handler(struct cm_work *work) cm_sidr_rep_handler() argument 3140 work->mad_recv_wc->recv_buf.mad; cm_sidr_rep_handler() 3154 cm_format_sidr_rep_event(work); cm_sidr_rep_handler() 3155 cm_process_work(cm_id_priv, work); cm_sidr_rep_handler() 3257 struct cm_work *work = container_of(_work, struct cm_work, work.work); cm_work_handler() local 3260 switch (work->cm_event.event) { cm_work_handler() 3262 ret = cm_req_handler(work); cm_work_handler() 3265 ret = cm_mra_handler(work); cm_work_handler() 3268 ret = cm_rej_handler(work); cm_work_handler() 3271 ret = cm_rep_handler(work); cm_work_handler() 3274 ret = cm_rtu_handler(work); cm_work_handler() 3277 ret = cm_establish_handler(work); cm_work_handler() 3280 ret = cm_dreq_handler(work); cm_work_handler() 3283 ret = cm_drep_handler(work); cm_work_handler() 3286 ret = cm_sidr_req_handler(work); cm_work_handler() 3289 ret = cm_sidr_rep_handler(work); cm_work_handler() 3292 ret = cm_lap_handler(work); cm_work_handler() 3295 ret = cm_apr_handler(work); cm_work_handler() 3298 ret = cm_timewait_handler(work); cm_work_handler() 3305 cm_free_work(work); cm_work_handler() 3311 struct cm_work *work; cm_establish() local 3315 work = kmalloc(sizeof *work, GFP_ATOMIC); cm_establish() 3316 if (!work) cm_establish() 3337 kfree(work); cm_establish() 3343 * can execute this work item. To prevent potential deadlock, cm_establish() 3347 INIT_DELAYED_WORK(&work->work, cm_work_handler); cm_establish() 3348 work->local_id = cm_id->local_id; cm_establish() 3349 work->remote_id = cm_id->remote_id; cm_establish() 3350 work->mad_recv_wc = NULL; cm_establish() 3351 work->cm_event.event = IB_CM_USER_ESTABLISHED; cm_establish() 3352 queue_delayed_work(cm.wq, &work->work, 0); cm_establish() 3399 struct cm_work *work; cm_recv_handler() local 3450 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, cm_recv_handler() 3452 if (!work) { cm_recv_handler() 3457 INIT_DELAYED_WORK(&work->work, cm_work_handler); cm_recv_handler() 3458 work->cm_event.event = event; cm_recv_handler() 3459 work->mad_recv_wc = mad_recv_wc; cm_recv_handler() 3460 work->port = port; cm_recv_handler() 3461 queue_delayed_work(cm.wq, &work->work, 0); cm_recv_handler() 3922 cancel_delayed_work(&timewait_info->work.work); ib_cm_cleanup() 1320 cm_process_work(struct cm_id_private *cm_id_priv, struct cm_work *work) cm_process_work() argument
|
H A D | cache.c | 56 struct work_struct work; member in struct:ib_update_work 329 struct ib_update_work *work = ib_cache_task() local 330 container_of(_work, struct ib_update_work, work); ib_cache_task() 332 ib_cache_update(work->device, work->port_num); ib_cache_task() 333 kfree(work); ib_cache_task() 339 struct ib_update_work *work; ib_cache_event() local 348 work = kmalloc(sizeof *work, GFP_ATOMIC); ib_cache_event() 349 if (work) { ib_cache_event() 350 INIT_WORK(&work->work, ib_cache_task); ib_cache_event() 351 work->device = event->device; ib_cache_event() 352 work->port_num = event->element.port_num; ib_cache_event() 353 queue_work(ib_wq, &work->work); ib_cache_event()
|
H A D | cma.c | 163 struct work_struct work; member in struct:cma_work 171 struct work_struct work; member in struct:cma_ndev_work 177 struct work_struct work; member in struct:iboe_mcast_work 1695 struct cma_work *work = context; cma_query_handler() local 1698 route = &work->id->id.route; cma_query_handler() 1704 work->old_state = RDMA_CM_ROUTE_QUERY; cma_query_handler() 1705 work->new_state = RDMA_CM_ADDR_RESOLVED; cma_query_handler() 1706 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; cma_query_handler() 1707 work->event.status = status; cma_query_handler() 1710 queue_work(cma_wq, &work->work); cma_query_handler() 1714 struct cma_work *work) cma_query_ib_route() 1755 work, &id_priv->query); cma_query_ib_route() 1762 struct cma_work *work = container_of(_work, struct cma_work, work); cma_work_handler() local 1763 struct rdma_id_private *id_priv = work->id; cma_work_handler() 1767 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) cma_work_handler() 1770 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { cma_work_handler() 1779 kfree(work); cma_work_handler() 1784 struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); cma_ndev_work_handler() local 1785 struct rdma_id_private *id_priv = work->id; cma_ndev_work_handler() 1793 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { cma_ndev_work_handler() 1803 kfree(work); cma_ndev_work_handler() 1809 struct cma_work *work; cma_resolve_ib_route() local 1812 work = kzalloc(sizeof *work, GFP_KERNEL); cma_resolve_ib_route() 1813 if (!work) cma_resolve_ib_route() 1816 work->id = id_priv; cma_resolve_ib_route() 1817 INIT_WORK(&work->work, cma_work_handler); cma_resolve_ib_route() 1818 work->old_state = RDMA_CM_ROUTE_QUERY; cma_resolve_ib_route() 1819 work->new_state = RDMA_CM_ROUTE_RESOLVED; cma_resolve_ib_route() 1820 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; cma_resolve_ib_route() 1828 ret = cma_query_ib_route(id_priv, timeout_ms, work); cma_resolve_ib_route() 1837 kfree(work); cma_resolve_ib_route() 1869 struct cma_work *work; cma_resolve_iw_route() local 1871 work = kzalloc(sizeof *work, GFP_KERNEL); cma_resolve_iw_route() 1872 if (!work) cma_resolve_iw_route() 1875 work->id = id_priv; cma_resolve_iw_route() 1876 INIT_WORK(&work->work, cma_work_handler); cma_resolve_iw_route() 1877 work->old_state = RDMA_CM_ROUTE_QUERY; cma_resolve_iw_route() 1878 work->new_state = RDMA_CM_ROUTE_RESOLVED; cma_resolve_iw_route() 1879 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; cma_resolve_iw_route() 1880 queue_work(cma_wq, &work->work); cma_resolve_iw_route() 1908 struct cma_work *work; cma_resolve_iboe_route() local 1913 work = kzalloc(sizeof *work, GFP_KERNEL); cma_resolve_iboe_route() 1914 if (!work) cma_resolve_iboe_route() 1917 work->id = id_priv; cma_resolve_iboe_route() 1918 INIT_WORK(&work->work, cma_work_handler); cma_resolve_iboe_route() 1960 work->old_state = RDMA_CM_ROUTE_QUERY; cma_resolve_iboe_route() 1961 work->new_state = RDMA_CM_ROUTE_RESOLVED; cma_resolve_iboe_route() 1962 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; cma_resolve_iboe_route() 1963 work->event.status = 0; cma_resolve_iboe_route() 1965 queue_work(cma_wq, &work->work); cma_resolve_iboe_route() 1973 kfree(work); cma_resolve_iboe_route() 2132 struct cma_work *work; cma_resolve_loopback() local 2136 work = kzalloc(sizeof *work, GFP_KERNEL); cma_resolve_loopback() 2137 if (!work) cma_resolve_loopback() 2149 work->id = id_priv; cma_resolve_loopback() 2150 INIT_WORK(&work->work, cma_work_handler); cma_resolve_loopback() 2151 work->old_state = RDMA_CM_ADDR_QUERY; cma_resolve_loopback() 2152 work->new_state = RDMA_CM_ADDR_RESOLVED; cma_resolve_loopback() 2153 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; cma_resolve_loopback() 2154 queue_work(cma_wq, &work->work); cma_resolve_loopback() 2157 kfree(work); cma_resolve_loopback() 2163 struct cma_work *work; cma_resolve_ib_addr() local 2166 work = kzalloc(sizeof *work, GFP_KERNEL); cma_resolve_ib_addr() 2167 if (!work) cma_resolve_ib_addr() 2179 work->id = id_priv; cma_resolve_ib_addr() 2180 INIT_WORK(&work->work, cma_work_handler); cma_resolve_ib_addr() 2181 work->old_state = RDMA_CM_ADDR_QUERY; cma_resolve_ib_addr() 2182 work->new_state = RDMA_CM_ADDR_RESOLVED; cma_resolve_ib_addr() 2183 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; cma_resolve_ib_addr() 2184 queue_work(cma_wq, &work->work); cma_resolve_ib_addr() 2187 kfree(work); cma_resolve_ib_addr() 3259 static void iboe_mcast_work_handler(struct work_struct *work) iboe_mcast_work_handler() argument 3261 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); iboe_mcast_work_handler() 3300 struct iboe_mcast_work *work; cma_iboe_join_multicast() local 3309 work = kzalloc(sizeof *work, GFP_KERNEL); cma_iboe_join_multicast() 3310 if (!work) cma_iboe_join_multicast() 3341 work->id = id_priv; cma_iboe_join_multicast() 3342 work->mc = mc; cma_iboe_join_multicast() 3343 INIT_WORK(&work->work, iboe_mcast_work_handler); cma_iboe_join_multicast() 3345 queue_work(cma_wq, &work->work); cma_iboe_join_multicast() 3352 kfree(work); cma_iboe_join_multicast() 3448 struct cma_ndev_work *work; cma_netdev_change() local 3456 work = kzalloc(sizeof *work, GFP_KERNEL); cma_netdev_change() 3457 if (!work) cma_netdev_change() 3460 INIT_WORK(&work->work, cma_ndev_work_handler); cma_netdev_change() 3461 work->id = id_priv; cma_netdev_change() 3462 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; cma_netdev_change() 3464 queue_work(cma_wq, &work->work); cma_netdev_change() 1713 cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, struct cma_work *work) cma_query_ib_route() argument
|
H A D | iwcm.c | 62 struct work_struct work; member in struct:iwcm_work 102 * If work elements cannot be allocated for the new connect request cm_id, 109 struct iwcm_work *work; get_work() local 113 work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work, get_work() 115 list_del_init(&work->free_list); get_work() 116 return work; get_work() 119 static void put_work(struct iwcm_work *work) put_work() argument 121 list_add(&work->free_list, &work->cm_id->work_free_list); put_work() 134 struct iwcm_work *work; alloc_work_entries() local 138 work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL); alloc_work_entries() 139 if (!work) { alloc_work_entries() 143 work->cm_id = cm_id_priv; alloc_work_entries() 144 INIT_LIST_HEAD(&work->list); alloc_work_entries() 145 put_work(work); alloc_work_entries() 629 * The work item contains a pointer to the listen_cm_id and the event. The 872 struct iwcm_work *work = container_of(_work, struct iwcm_work, work); cm_work_handler() local 874 struct iwcm_id_private *cm_id_priv = work->cm_id; cm_work_handler() 883 work = list_entry(cm_id_priv->work_list.next, cm_work_handler() 885 list_del_init(&work->list); cm_work_handler() 887 levent = work->event; cm_work_handler() 888 put_work(work); cm_work_handler() 916 * work_list. If this is the first event on the work_list, the work 930 struct iwcm_work *work; cm_event_handler() local 938 work = get_work(cm_id_priv); cm_event_handler() 939 if (!work) { cm_event_handler() 944 INIT_WORK(&work->work, cm_work_handler); cm_event_handler() 945 work->cm_id = cm_id_priv; cm_event_handler() 946 work->event = *iw_event; cm_event_handler() 948 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || cm_event_handler() 949 work->event.event == IW_CM_EVENT_CONNECT_REPLY) && cm_event_handler() 950 work->event.private_data_len) { cm_event_handler() 951 ret = copy_private_data(&work->event); cm_event_handler() 953 put_work(work); cm_event_handler() 960 list_add_tail(&work->list, &cm_id_priv->work_list); cm_event_handler() 961 queue_work(iwcm_wq, &work->work); cm_event_handler() 963 list_add_tail(&work->list, &cm_id_priv->work_list); cm_event_handler()
|
/linux-4.1.27/drivers/leds/ |
H A D | leds-dac124s085.c | 28 struct work_struct work; member in struct:dac124s085_led 41 static void dac124s085_led_work(struct work_struct *work) dac124s085_led_work() argument 43 struct dac124s085_led *led = container_of(work, struct dac124s085_led, dac124s085_led_work() 44 work); dac124s085_led_work() 62 schedule_work(&led->work); dac124s085_set_brightness() 85 INIT_WORK(&led->work, dac124s085_led_work); dac124s085_probe() 114 cancel_work_sync(&dac->leds[i].work); dac124s085_remove()
|
H A D | leds-da903x.c | 36 struct work_struct work; member in struct:da903x_led 46 static void da903x_led_work(struct work_struct *work) da903x_led_work() argument 48 struct da903x_led *led = container_of(work, struct da903x_led, work); da903x_led_work() 90 schedule_work(&led->work); da903x_led_set() 124 INIT_WORK(&led->work, da903x_led_work); da903x_led_probe()
|
H A D | leds-adp5520.c | 26 struct work_struct work; member in struct:adp5520_led 33 static void adp5520_led_work(struct work_struct *work) adp5520_led_work() argument 35 struct adp5520_led *led = container_of(work, struct adp5520_led, work); adp5520_led_work() 47 schedule_work(&led->work); adp5520_led_set() 151 INIT_WORK(&led_dat->work, adp5520_led_work); adp5520_led_probe() 175 cancel_work_sync(&led[i].work); adp5520_led_probe() 195 cancel_work_sync(&led[i].work); adp5520_led_remove()
|
H A D | leds-da9052.c | 35 struct work_struct work; member in struct:da9052_led 61 static void da9052_led_work(struct work_struct *work) da9052_led_work() argument 63 struct da9052_led *led = container_of(work, struct da9052_led, work); da9052_led_work() 75 schedule_work(&led->work); da9052_led_set() 142 INIT_WORK(&led[i].work, da9052_led_work); da9052_led_probe() 171 cancel_work_sync(&led[i].work); da9052_led_probe() 193 cancel_work_sync(&led[i].work); da9052_led_remove()
|
H A D | leds-regulator.c | 31 struct work_struct work; member in struct:regulator_led 126 static void led_work(struct work_struct *work) led_work() argument 130 led = container_of(work, struct regulator_led, work); led_work() 140 schedule_work(&led->work); regulator_led_brightness_set() 184 INIT_WORK(&led->work, led_work); regulator_led_probe() 190 cancel_work_sync(&led->work); regulator_led_probe() 208 cancel_work_sync(&led->work); regulator_led_remove()
|
H A D | leds-pca9532.c | 44 struct work_struct work; member in struct:pca9532_data 177 schedule_work(&led->work); pca9532_set_brightness() 201 schedule_work(&led->work); pca9532_set_blink() 219 schedule_work(&data->work); pca9532_event() 224 static void pca9532_input_work(struct work_struct *work) pca9532_input_work() argument 227 container_of(work, struct pca9532_data, work); pca9532_input_work() 236 static void pca9532_led_work(struct work_struct *work) pca9532_led_work() argument 239 led = container_of(work, struct pca9532_led, work); pca9532_led_work() 310 cancel_work_sync(&data->leds[i].work); pca9532_destroy_devices() 314 cancel_work_sync(&data->work); pca9532_destroy_devices() 364 INIT_WORK(&led->work, pca9532_led_work); pca9532_configure() 394 INIT_WORK(&data->work, pca9532_input_work); pca9532_configure() 397 cancel_work_sync(&data->work); pca9532_configure()
|
H A D | leds-lp8788.c | 29 struct work_struct work; member in struct:lp8788_led 91 static void lp8788_led_work(struct work_struct *work) lp8788_led_work() argument 93 struct lp8788_led *led = container_of(work, struct lp8788_led, work); lp8788_led_work() 125 schedule_work(&led->work); lp8788_brightness_set() 152 INIT_WORK(&led->work, lp8788_led_work); lp8788_led_probe() 176 flush_work(&led->work); lp8788_led_remove()
|
H A D | leds-lt3593.c | 31 struct work_struct work; member in struct:lt3593_led_data 35 static void lt3593_led_work(struct work_struct *work) lt3593_led_work() argument 39 container_of(work, struct lt3593_led_data, work); lt3593_led_work() 81 schedule_work(&led_dat->work); lt3593_led_set() 114 INIT_WORK(&led_dat->work, lt3593_led_work); create_lt3593_led() 132 cancel_work_sync(&led->work); delete_lt3593_led()
|
H A D | leds-pwm.c | 30 struct work_struct work; member in struct:led_pwm_data 54 static void led_pwm_work(struct work_struct *work) led_pwm_work() argument 57 container_of(work, struct led_pwm_data, work); led_pwm_work() 79 schedule_work(&led_dat->work); led_pwm_set() 95 cancel_work_sync(&priv->leds[priv->num_leds].work); led_pwm_cleanup() 126 INIT_WORK(&led_data->work, led_pwm_work); led_pwm_add()
|
H A D | leds-wm8350.c | 141 static void led_work(struct work_struct *work) led_work() argument 143 struct wm8350_led *led = container_of(work, struct wm8350_led, work); led_work() 187 schedule_work(&led->work); wm8350_led_set() 255 INIT_WORK(&led->work, led_work); wm8350_led_probe() 267 flush_work(&led->work); wm8350_led_remove()
|
H A D | leds-mc13783.c | 35 struct work_struct work; member in struct:mc13xxx_led 58 static void mc13xxx_led_work(struct work_struct *work) mc13xxx_led_work() argument 60 struct mc13xxx_led *led = container_of(work, struct mc13xxx_led, work); mc13xxx_led_work() 120 schedule_work(&led->work); mc13xxx_led_set() 263 INIT_WORK(&leds->led[i].work, mc13xxx_led_work); mc13xxx_led_probe() 275 cancel_work_sync(&leds->led[i].work); mc13xxx_led_probe() 288 cancel_work_sync(&leds->led[i].work); mc13xxx_led_remove()
|
H A D | leds-wm831x-status.c | 26 struct work_struct work; member in struct:wm831x_status 43 static void wm831x_status_work(struct work_struct *work) wm831x_status_work() argument 45 struct wm831x_status *led = container_of(work, struct wm831x_status, wm831x_status_work() 46 work); wm831x_status_work() 83 schedule_work(&led->work); wm831x_status_set() 152 schedule_work(&led->work); wm831x_status_blink_set() 211 schedule_work(&led->work); wm831x_status_src_store() 265 INIT_WORK(&drvdata->work, wm831x_status_work); wm831x_status_probe()
|
H A D | leds-gpio.c | 27 struct work_struct work; member in struct:gpio_led_data 35 static void gpio_led_work(struct work_struct *work) gpio_led_work() argument 38 container_of(work, struct gpio_led_data, work); gpio_led_work() 66 schedule_work(&led_dat->work); gpio_led_set() 145 INIT_WORK(&led_dat->work, gpio_led_work); create_gpio_led() 153 cancel_work_sync(&led->work); delete_gpio_led()
|
/linux-4.1.27/arch/sparc/kernel/ |
H A D | sun4d_smp.c | 195 struct sun4d_ipi_work *work; smp4d_ipi_init() local 200 work = &per_cpu(sun4d_ipi_work, cpu); for_each_possible_cpu() 201 work->single = work->msk = work->resched = 0; for_each_possible_cpu() 207 struct sun4d_ipi_work *work = this_cpu_ptr(&sun4d_ipi_work); sun4d_ipi_interrupt() local 209 if (work->single) { sun4d_ipi_interrupt() 210 work->single = 0; sun4d_ipi_interrupt() 213 if (work->msk) { sun4d_ipi_interrupt() 214 work->msk = 0; sun4d_ipi_interrupt() 217 if (work->resched) { sun4d_ipi_interrupt() 218 work->resched = 0; sun4d_ipi_interrupt() 238 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); sun4d_ipi_single() local 240 /* Mark work */ sun4d_ipi_single() 241 work->single = 1; sun4d_ipi_single() 249 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); sun4d_ipi_mask_one() local 251 /* Mark work */ sun4d_ipi_mask_one() 252 work->msk = 1; sun4d_ipi_mask_one() 260 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); sun4d_ipi_resched() local 262 /* Mark work */ sun4d_ipi_resched() 263 work->resched = 1; sun4d_ipi_resched()
|
H A D | leon_smp.c | 139 prom_printf("######## !!!! The irqmp-ctrl must have broadcast enabled, smp wont work !!!!! ####### nr cpus: %d\n", leon_smp_setbroadcast() 276 struct leon_ipi_work *work; leon_ipi_init() local 299 work = &per_cpu(leon_ipi_work, cpu); for_each_possible_cpu() 300 work->single = work->msk = work->resched = 0; for_each_possible_cpu() 313 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); leon_ipi_single() local 315 /* Mark work */ leon_ipi_single() 316 work->single = 1; leon_ipi_single() 324 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); leon_ipi_mask_one() local 326 /* Mark work */ leon_ipi_mask_one() 327 work->msk = 1; leon_ipi_mask_one() 335 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); leon_ipi_resched() local 337 /* Mark work */ leon_ipi_resched() 338 work->resched = 1; leon_ipi_resched() 346 struct leon_ipi_work *work = this_cpu_ptr(&leon_ipi_work); leonsmp_ipi_interrupt() local 348 if (work->single) { leonsmp_ipi_interrupt() 349 work->single = 0; leonsmp_ipi_interrupt() 352 if (work->msk) { leonsmp_ipi_interrupt() 353 work->msk = 0; leonsmp_ipi_interrupt() 356 if (work->resched) { leonsmp_ipi_interrupt() 357 work->resched = 0; leonsmp_ipi_interrupt()
|
/linux-4.1.27/drivers/input/touchscreen/ |
H A D | pcap_ts.c | 27 struct delayed_work work; member in struct:pcap_ts 52 schedule_delayed_work(&pcap_ts->work, 0); pcap_ts_read_xy() 64 schedule_delayed_work(&pcap_ts->work, 0); pcap_ts_read_xy() 75 schedule_delayed_work(&pcap_ts->work, pcap_ts_read_xy() 88 static void pcap_ts_work(struct work_struct *work) pcap_ts_work() argument 90 struct delayed_work *dw = container_of(work, struct delayed_work, work); pcap_ts_work() 91 struct pcap_ts *pcap_ts = container_of(dw, struct pcap_ts, work); pcap_ts_work() 113 schedule_delayed_work(&pcap_ts->work, 0); pcap_ts_event_touch() 123 schedule_delayed_work(&pcap_ts->work, 0); pcap_ts_open() 132 cancel_delayed_work_sync(&pcap_ts->work); pcap_ts_close() 156 INIT_DELAYED_WORK(&pcap_ts->work, pcap_ts_work); pcap_ts_probe() 209 cancel_delayed_work_sync(&pcap_ts->work); pcap_ts_remove()
|
H A D | hp680_ts_input.c | 20 static void do_softint(struct work_struct *work); 23 static DECLARE_DELAYED_WORK(work, do_softint); 25 static void do_softint(struct work_struct *work) do_softint() argument 71 schedule_delayed_work(&work, HZ / 20); hp680_ts_interrupt() 110 cancel_delayed_work_sync(&work); hp680_ts_init() 118 cancel_delayed_work_sync(&work); hp680_ts_exit()
|
H A D | mc13783_ts.c | 39 struct delayed_work work; member in struct:mc13783_ts_priv 52 * Kick off reading coordinates. Note that if work happens already mc13783_ts_handler() 57 queue_delayed_work(priv->workq, &priv->work, 0); mc13783_ts_handler() 109 queue_delayed_work(priv->workq, &priv->work, HZ / 50); mc13783_ts_report_sample() 121 static void mc13783_ts_work(struct work_struct *work) mc13783_ts_work() argument 124 container_of(work, struct mc13783_ts_priv, work.work); mc13783_ts_work() 168 cancel_delayed_work_sync(&priv->work); mc13783_ts_close() 182 INIT_DELAYED_WORK(&priv->work, mc13783_ts_work); mc13783_ts_probe()
|
H A D | eeti_ts.c | 49 struct work_struct work; member in struct:eeti_ts_priv 68 static void eeti_ts_read(struct work_struct *work) eeti_ts_read() argument 73 container_of(work, struct eeti_ts_priv, work); eeti_ts_read() 122 schedule_work(&priv->work); eeti_ts_isr() 132 eeti_ts_read(&priv->work); eeti_ts_start() 138 cancel_work_sync(&priv->work); eeti_ts_stop() 214 INIT_WORK(&priv->work, eeti_ts_read); eeti_ts_probe()
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
H A D | ipath_user_pages.c | 187 struct work_struct work; member in struct:ipath_user_pages_work 194 struct ipath_user_pages_work *work = user_pages_account() local 195 container_of(_work, struct ipath_user_pages_work, work); user_pages_account() 197 down_write(&work->mm->mmap_sem); user_pages_account() 198 work->mm->pinned_vm -= work->num_pages; user_pages_account() 199 up_write(&work->mm->mmap_sem); user_pages_account() 200 mmput(work->mm); user_pages_account() 201 kfree(work); user_pages_account() 206 struct ipath_user_pages_work *work; ipath_release_user_pages_on_close() local 215 work = kmalloc(sizeof(*work), GFP_KERNEL); ipath_release_user_pages_on_close() 216 if (!work) ipath_release_user_pages_on_close() 219 INIT_WORK(&work->work, user_pages_account); ipath_release_user_pages_on_close() 220 work->mm = mm; ipath_release_user_pages_on_close() 221 work->num_pages = num_pages; ipath_release_user_pages_on_close() 223 queue_work(ib_wq, &work->work); ipath_release_user_pages_on_close()
|
/linux-4.1.27/drivers/scsi/libsas/ |
H A D | sas_phy.c | 33 static void sas_phye_loss_of_signal(struct work_struct *work) sas_phye_loss_of_signal() argument 35 struct asd_sas_event *ev = to_asd_sas_event(work); sas_phye_loss_of_signal() 43 static void sas_phye_oob_done(struct work_struct *work) sas_phye_oob_done() argument 45 struct asd_sas_event *ev = to_asd_sas_event(work); sas_phye_oob_done() 52 static void sas_phye_oob_error(struct work_struct *work) sas_phye_oob_error() argument 54 struct asd_sas_event *ev = to_asd_sas_event(work); sas_phye_oob_error() 83 static void sas_phye_spinup_hold(struct work_struct *work) sas_phye_spinup_hold() argument 85 struct asd_sas_event *ev = to_asd_sas_event(work); sas_phye_spinup_hold() 97 static void sas_phye_resume_timeout(struct work_struct *work) sas_phye_resume_timeout() argument 99 struct asd_sas_event *ev = to_asd_sas_event(work); sas_phye_resume_timeout() 147 INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]); sas_register_phys() 152 INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]); sas_register_phys()
|
H A D | sas_event.c | 40 scsi_queue_work(ha->core.shost, &sw->work); sas_queue_work() 44 struct sas_work *work, sas_queue_event() 51 sas_queue_work(ha, work); sas_queue_event() 114 sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha); sas_enable_revalidation() 124 &sas_ha->ha_events[event].work, sas_ha); notify_ha_event() 134 &phy->port_events[event].work, ha); notify_port_event() 144 &phy->phy_events[event].work, ha); sas_notify_phy_event() 156 INIT_SAS_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]); sas_init_events() 43 sas_queue_event(int event, unsigned long *pending, struct sas_work *work, struct sas_ha_struct *ha) sas_queue_event() argument
|
H A D | sas_port.c | 259 void sas_porte_bytes_dmaed(struct work_struct *work) sas_porte_bytes_dmaed() argument 261 struct asd_sas_event *ev = to_asd_sas_event(work); sas_porte_bytes_dmaed() 269 void sas_porte_broadcast_rcvd(struct work_struct *work) sas_porte_broadcast_rcvd() argument 271 struct asd_sas_event *ev = to_asd_sas_event(work); sas_porte_broadcast_rcvd() 286 void sas_porte_link_reset_err(struct work_struct *work) sas_porte_link_reset_err() argument 288 struct asd_sas_event *ev = to_asd_sas_event(work); sas_porte_link_reset_err() 296 void sas_porte_timer_event(struct work_struct *work) sas_porte_timer_event() argument 298 struct asd_sas_event *ev = to_asd_sas_event(work); sas_porte_timer_event() 306 void sas_porte_hard_reset(struct work_struct *work) sas_porte_hard_reset() argument 308 struct asd_sas_event *ev = to_asd_sas_event(work); sas_porte_hard_reset()
|
H A D | sas_discover.c | 215 static void sas_probe_devices(struct work_struct *work) sas_probe_devices() argument 218 struct sas_discovery_event *ev = to_sas_discovery_event(work); sas_probe_devices() 243 static void sas_suspend_devices(struct work_struct *work) sas_suspend_devices() argument 247 struct sas_discovery_event *ev = to_sas_discovery_event(work); sas_suspend_devices() 274 static void sas_resume_devices(struct work_struct *work) sas_resume_devices() argument 276 struct sas_discovery_event *ev = to_sas_discovery_event(work); sas_resume_devices() 356 static void sas_destruct_devices(struct work_struct *work) sas_destruct_devices() argument 359 struct sas_discovery_event *ev = to_sas_discovery_event(work); sas_destruct_devices() 439 static void sas_discover_domain(struct work_struct *work) sas_discover_domain() argument 443 struct sas_discovery_event *ev = to_sas_discovery_event(work); sas_discover_domain() 497 static void sas_revalidate_domain(struct work_struct *work) sas_revalidate_domain() argument 500 struct sas_discovery_event *ev = to_sas_discovery_event(work); sas_revalidate_domain() 532 /* chained work is not subject to SA_HA_DRAINING or sas_chain_work() 537 scsi_queue_work(ha->core.shost, &sw->work); sas_chain_work() 563 sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha); sas_discover_event() 589 INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]); sas_init_disc()
|
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvif/ |
H A D | notify.h | 23 struct work_struct work; member in struct:nvif_notify 27 int (*func)(struct nvif_notify *), bool work, u8 type, 35 bool work, u8 type, void *data, u32 size, u32 reply,
|
/linux-4.1.27/sound/aoa/core/ |
H A D | gpio-pmf.c | 73 static void pmf_handle_notify(struct work_struct *work) pmf_handle_notify() argument 76 container_of(work, struct gpio_notification, work.work); pmf_handle_notify() 88 INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify); pmf_gpio_init() 89 INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify); pmf_gpio_init() 90 INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify); pmf_gpio_init() 108 /* make sure no work is pending before freeing pmf_gpio_exit() 110 cancel_delayed_work_sync(&rt->headphone_notify.work); pmf_gpio_exit() 111 cancel_delayed_work_sync(&rt->line_in_notify.work); pmf_gpio_exit() 112 cancel_delayed_work_sync(&rt->line_out_notify.work); pmf_gpio_exit() 127 schedule_delayed_work(¬if->work, 0); pmf_handle_notify_irq()
|
H A D | gpio-feature.c | 208 static void ftr_handle_notify(struct work_struct *work) ftr_handle_notify() argument 211 container_of(work, struct gpio_notification, work.work); ftr_handle_notify() 273 INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify); ftr_gpio_init() 274 INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify); ftr_gpio_init() 275 INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify); ftr_gpio_init() 291 cancel_delayed_work_sync(&rt->headphone_notify.work); ftr_gpio_exit() 292 cancel_delayed_work_sync(&rt->line_in_notify.work); ftr_gpio_exit() 293 cancel_delayed_work_sync(&rt->line_out_notify.work); ftr_gpio_exit() 303 schedule_delayed_work(¬if->work, 0); ftr_handle_notify_irq()
|
/linux-4.1.27/fs/ |
H A D | fs-writeback.c | 52 struct list_head list; /* pending work list */ 121 struct wb_writeback_work *work) bdi_queue_work() 123 trace_writeback_queue(bdi, work); bdi_queue_work() 127 if (work->done) bdi_queue_work() 128 complete(work->done); bdi_queue_work() 131 list_add_tail(&work->list, &bdi->work_list); bdi_queue_work() 141 struct wb_writeback_work *work; __bdi_start_writeback() local 147 work = kzalloc(sizeof(*work), GFP_ATOMIC); __bdi_start_writeback() 148 if (!work) { __bdi_start_writeback() 154 work->sync_mode = WB_SYNC_NONE; __bdi_start_writeback() 155 work->nr_pages = nr_pages; __bdi_start_writeback() 156 work->range_cyclic = range_cyclic; __bdi_start_writeback() 157 work->reason = reason; __bdi_start_writeback() 159 bdi_queue_work(bdi, work); __bdi_start_writeback() 166 * @reason: reason why some writeback work was initiated 194 * writeback as soon as there is no other work to do. bdi_start_background_writeback() 271 * Move expired (dirtied before work->older_than_this) dirty inodes from 277 struct wb_writeback_work *work) move_expired_inodes() 289 older_than_this = work->older_than_this; move_expired_inodes() 290 else if (!work->for_sync) { move_expired_inodes() 340 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) queue_io() argument 346 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); queue_io() 348 EXPIRE_DIRTY_ATIME, work); queue_io() 349 trace_writeback_queue_io(wb, work, moved); queue_io() 628 struct wb_writeback_work *work) writeback_chunk_size() 645 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) writeback_chunk_size() 650 pages = min(pages, work->nr_pages); writeback_chunk_size() 665 struct wb_writeback_work *work) writeback_sb_inodes() 668 .sync_mode = work->sync_mode, writeback_sb_inodes() 669 .tagged_writepages = work->tagged_writepages, writeback_sb_inodes() 670 .for_kupdate = work->for_kupdate, writeback_sb_inodes() 671 .for_background = work->for_background, writeback_sb_inodes() 672 .for_sync = work->for_sync, writeback_sb_inodes() 673 .range_cyclic = work->range_cyclic, writeback_sb_inodes() 685 if (work->sb) { writeback_sb_inodes() 746 write_chunk = writeback_chunk_size(wb->bdi, work); writeback_sb_inodes() 756 work->nr_pages -= write_chunk - wbc.nr_to_write; writeback_sb_inodes() 773 if (work->nr_pages <= 0) writeback_sb_inodes() 781 struct wb_writeback_work *work) __writeback_inodes_wb() 799 wrote += writeback_sb_inodes(sb, wb, work); __writeback_inodes_wb() 806 if (work->nr_pages <= 0) __writeback_inodes_wb() 817 struct wb_writeback_work work = { writeback_inodes_wb() local 826 queue_io(wb, &work); writeback_inodes_wb() 827 __writeback_inodes_wb(wb, &work); writeback_inodes_wb() 830 return nr_pages - work.nr_pages; writeback_inodes_wb() 876 struct wb_writeback_work *work) wb_writeback() 879 long nr_pages = work->nr_pages; wb_writeback() 885 work->older_than_this = &oldest_jif; wb_writeback() 892 if (work->nr_pages <= 0) wb_writeback() 897 * run forever. Stop them if there is other work to do wb_writeback() 901 if ((work->for_background || work->for_kupdate) && wb_writeback() 909 if (work->for_background && !over_bground_thresh(wb->bdi)) wb_writeback() 915 * handled by these works yielding to any other work so we are wb_writeback() 918 if (work->for_kupdate) { wb_writeback() 921 } else if (work->for_background) wb_writeback() 924 trace_writeback_start(wb->bdi, work); wb_writeback() 926 queue_io(wb, work); wb_writeback() 927 if (work->sb) wb_writeback() 928 progress = writeback_sb_inodes(work->sb, wb, work); wb_writeback() 930 progress = __writeback_inodes_wb(wb, work); wb_writeback() 931 trace_writeback_written(wb->bdi, work); wb_writeback() 940 * mean the overall work is done. So we keep looping as long wb_writeback() 956 trace_writeback_wait(wb->bdi, work); wb_writeback() 967 return nr_pages - work->nr_pages; wb_writeback() 976 struct wb_writeback_work *work = NULL; get_next_work_item() local 980 work = list_entry(bdi->work_list.next, get_next_work_item() 982 list_del_init(&work->list); get_next_work_item() 985 return work; get_next_work_item() 1003 struct wb_writeback_work work = { wb_check_background_flush() local 1011 return wb_writeback(wb, &work); wb_check_background_flush() 1037 struct wb_writeback_work work = { wb_check_old_data_flush() local 1045 return wb_writeback(wb, &work); wb_check_old_data_flush() 1052 * Retrieve work items and do the writeback they describe 1057 struct wb_writeback_work *work; wb_do_writeback() local 1061 while ((work = get_next_work_item(bdi)) != NULL) { wb_do_writeback() 1063 trace_writeback_exec(bdi, work); wb_do_writeback() 1065 wrote += wb_writeback(wb, work); wb_do_writeback() 1069 * work item, otherwise just free it. wb_do_writeback() 1071 if (work->done) wb_do_writeback() 1072 complete(work->done); wb_do_writeback() 1074 kfree(work); wb_do_writeback() 1091 void bdi_writeback_workfn(struct work_struct *work) bdi_writeback_workfn() argument 1093 struct bdi_writeback *wb = container_of(to_delayed_work(work), bdi_writeback_workfn() 1157 * inodes on the system. So instead we define a separate delayed work 1418 * @reason: reason why some writeback work initiated 1429 struct wb_writeback_work work = { writeback_inodes_sb_nr() local 1441 bdi_queue_work(sb->s_bdi, &work); writeback_inodes_sb_nr() 1449 * @reason: reason why some writeback work was initiated 1489 * @reason: reason why some writeback work was initiated 1510 struct wb_writeback_work work = { sync_inodes_sb() local 1525 bdi_queue_work(sb->s_bdi, &work); sync_inodes_sb() 120 bdi_queue_work(struct backing_dev_info *bdi, struct wb_writeback_work *work) bdi_queue_work() argument 274 move_expired_inodes(struct list_head *delaying_queue, struct list_head *dispatch_queue, int flags, struct wb_writeback_work *work) move_expired_inodes() argument 627 writeback_chunk_size(struct backing_dev_info *bdi, struct wb_writeback_work *work) writeback_chunk_size() argument 663 writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb, struct wb_writeback_work *work) writeback_sb_inodes() argument 780 __writeback_inodes_wb(struct bdi_writeback *wb, struct wb_writeback_work *work) __writeback_inodes_wb() argument 875 wb_writeback(struct bdi_writeback *wb, struct wb_writeback_work *work) wb_writeback() argument
|
/linux-4.1.27/drivers/gpu/drm/exynos/ |
H A D | exynos_drm_ipp.h | 35 * A structure of command work information. 36 * @work: work structure. 37 * @ippdrv: current work ippdrv. 42 struct work_struct work; member in struct:drm_exynos_ipp_cmd_work 60 * @start_work: start command work structure. 61 * @stop_work: stop command work structure. 62 * @event_work: event work structure. 106 * A structure of event work information. 108 * @work: work structure. 109 * @ippdrv: current work ippdrv. 113 struct work_struct work; member in struct:drm_exynos_ipp_event_work 147 * @event_workq: event work queue. 156 * @sched_event: work schedule handler. 176 void (*sched_event)(struct work_struct *work); 193 extern void ipp_sched_cmd(struct work_struct *work); 194 extern void ipp_sched_event(struct work_struct *work);
|
/linux-4.1.27/drivers/pci/hotplug/ |
H A D | pciehp_ctrl.c | 38 static void interrupt_event_handler(struct work_struct *work); 50 INIT_WORK(&info->work, interrupt_event_handler); queue_interrupt_event() 52 queue_work(p_slot->wq, &info->work); queue_interrupt_event() 279 struct work_struct work; member in struct:power_work_info 287 * @work: &struct work_struct describing work to be done 292 static void pciehp_power_thread(struct work_struct *work) pciehp_power_thread() argument 295 container_of(work, struct power_work_info, work); pciehp_power_thread() 333 void pciehp_queue_pushbutton_work(struct work_struct *work) pciehp_queue_pushbutton_work() argument 335 struct slot *p_slot = container_of(work, struct slot, work.work); pciehp_queue_pushbutton_work() 345 INIT_WORK(&info->work, pciehp_power_thread); pciehp_queue_pushbutton_work() 361 queue_work(p_slot->wq, &info->work); pciehp_queue_pushbutton_work() 389 queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); handle_button_press_event() 399 cancel_delayed_work(&p_slot->work); handle_button_press_event() 439 INIT_WORK(&info->work, pciehp_power_thread); handle_surprise_event() 450 queue_work(p_slot->wq, &info->work); handle_surprise_event() 469 INIT_WORK(&info->work, pciehp_power_thread); handle_link_event() 474 cancel_delayed_work(&p_slot->work); handle_link_event() 479 queue_work(p_slot->wq, &info->work); handle_link_event() 492 queue_work(p_slot->wq, &info->work); handle_link_event() 501 queue_work(p_slot->wq, &info->work); handle_link_event() 517 static void interrupt_event_handler(struct work_struct *work) interrupt_event_handler() argument 519 struct event_info *info = container_of(work, struct event_info, work); interrupt_event_handler() 630 cancel_delayed_work(&p_slot->work); pciehp_sysfs_enable_slot() 667 cancel_delayed_work(&p_slot->work); pciehp_sysfs_disable_slot()
|
H A D | shpchp_ctrl.c | 38 static void interrupt_event_handler(struct work_struct *work); 52 INIT_WORK(&info->work, interrupt_event_handler); queue_interrupt_event() 54 queue_work(p_slot->wq, &info->work); queue_interrupt_event() 393 struct work_struct work; member in struct:pushbutton_work_info 398 * @work: &struct work_struct to be handled 403 static void shpchp_pushbutton_thread(struct work_struct *work) shpchp_pushbutton_thread() argument 406 container_of(work, struct pushbutton_work_info, work); shpchp_pushbutton_thread() 432 void shpchp_queue_pushbutton_work(struct work_struct *work) shpchp_queue_pushbutton_work() argument 434 struct slot *p_slot = container_of(work, struct slot, work.work); shpchp_queue_pushbutton_work() 444 INIT_WORK(&info->work, shpchp_pushbutton_thread); shpchp_queue_pushbutton_work() 458 queue_work(p_slot->wq, &info->work); shpchp_queue_pushbutton_work() 506 queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); handle_button_press_event() 517 cancel_delayed_work(&p_slot->work); handle_button_press_event() 544 static void interrupt_event_handler(struct work_struct *work) interrupt_event_handler() argument 546 struct event_info *info = container_of(work, struct event_info, work); interrupt_event_handler() 670 cancel_delayed_work(&p_slot->work); shpchp_sysfs_enable_slot() 705 cancel_delayed_work(&p_slot->work); shpchp_sysfs_disable_slot()
|
/linux-4.1.27/arch/um/kernel/skas/ |
H A D | Makefile | 9 # GCC hardened also auto-enables -fpic, but we need %ebx so it can't work ->
|
/linux-4.1.27/drivers/hwmon/ |
H A D | abx500.h | 44 * @work: delayed work scheduled to monitor temperature periodically 45 * @work_active: True if work is active 60 struct delayed_work work; member in struct:abx500_temp
|
/linux-4.1.27/fs/ncpfs/ |
H A D | ncp_fs_i.h | 13 * all the information we need to work with an inode after creation.
|
/linux-4.1.27/arch/hexagon/kernel/ |
H A D | Makefile | 11 # Modules required to work with the Hexagon Virtual Machine
|
/linux-4.1.27/arch/arm/mach-pxa/include/mach/ |
H A D | io.h | 13 * drivers out there that might just work if we fake them...
|
H A D | reset.h | 16 * @output: set gpio as output instead of input during normal work
|
/linux-4.1.27/arch/alpha/include/asm/ |
H A D | percpu.h | 6 * 32-bit displacement from the GP. Which doesn't work for per cpu
|
/linux-4.1.27/include/linux/mfd/ |
H A D | ds1wm.h | 8 /* should work if your bus devices recover*/
|
/linux-4.1.27/fs/afs/ |
H A D | cmservice.c | 151 static void SRXAFSCB_CallBack(struct work_struct *work) SRXAFSCB_CallBack() argument 153 struct afs_call *call = container_of(work, struct afs_call, work); SRXAFSCB_CallBack() 287 * work, even if the final ACK isn't received. afs_deliver_cb_callback() 310 INIT_WORK(&call->work, SRXAFSCB_CallBack); afs_deliver_cb_callback() 311 queue_work(afs_wq, &call->work); afs_deliver_cb_callback() 318 static void SRXAFSCB_InitCallBackState(struct work_struct *work) SRXAFSCB_InitCallBackState() argument 320 struct afs_call *call = container_of(work, struct afs_call, work); SRXAFSCB_InitCallBackState() 357 INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); afs_deliver_cb_init_call_back_state() 358 queue_work(afs_wq, &call->work); afs_deliver_cb_init_call_back_state() 388 INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); afs_deliver_cb_init_call_back_state3() 389 queue_work(afs_wq, &call->work); afs_deliver_cb_init_call_back_state3() 396 static void SRXAFSCB_Probe(struct work_struct *work) SRXAFSCB_Probe() argument 398 struct afs_call *call = container_of(work, struct afs_call, work); SRXAFSCB_Probe() 421 INIT_WORK(&call->work, SRXAFSCB_Probe); afs_deliver_cb_probe() 422 queue_work(afs_wq, &call->work); afs_deliver_cb_probe() 429 static void SRXAFSCB_ProbeUuid(struct work_struct *work) SRXAFSCB_ProbeUuid() argument 431 struct afs_call *call = container_of(work, struct afs_call, work); SRXAFSCB_ProbeUuid() 517 INIT_WORK(&call->work, SRXAFSCB_ProbeUuid); afs_deliver_cb_probe_uuid() 518 queue_work(afs_wq, &call->work); afs_deliver_cb_probe_uuid() 525 static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work) SRXAFSCB_TellMeAboutYourself() argument 528 struct afs_call *call = container_of(work, struct afs_call, work); SRXAFSCB_TellMeAboutYourself() 601 INIT_WORK(&call->work, SRXAFSCB_TellMeAboutYourself); afs_deliver_cb_tell_me_about_yourself() 602 queue_work(afs_wq, &call->work); afs_deliver_cb_tell_me_about_yourself()
|
/linux-4.1.27/drivers/extcon/ |
H A D | extcon-gpio.c | 41 struct delayed_work work; member in struct:gpio_extcon_data 46 static void gpio_extcon_work(struct work_struct *work) gpio_extcon_work() argument 50 container_of(to_delayed_work(work), struct gpio_extcon_data, gpio_extcon_work() 51 work); gpio_extcon_work() 63 queue_delayed_work(system_power_efficient_wq, &extcon_data->work, gpio_irq_handler() 134 INIT_DELAYED_WORK(&extcon_data->work, gpio_extcon_work); gpio_extcon_probe() 148 gpio_extcon_work(&extcon_data->work.work); gpio_extcon_probe() 157 cancel_delayed_work_sync(&extcon_data->work); gpio_extcon_remove() 171 &extcon_data->work, extcon_data->debounce_jiffies); gpio_extcon_resume()
|
/linux-4.1.27/drivers/leds/trigger/ |
H A D | ledtrig-gpio.c | 24 struct work_struct work; member in struct:gpio_trig_data 37 schedule_work(&gpio_data->work); gpio_trig_irq() 42 static void gpio_trig_work(struct work_struct *work) gpio_trig_work() argument 44 struct gpio_trig_data *gpio_data = container_of(work, gpio_trig_work() 45 struct gpio_trig_data, work); gpio_trig_work() 123 schedule_work(&gpio_data->work); gpio_trig_inverted_store() 150 flush_work(&gpio_data->work); gpio_trig_gpio_store() 202 INIT_WORK(&gpio_data->work, gpio_trig_work); gpio_trig_activate() 225 flush_work(&gpio_data->work); gpio_trig_deactivate()
|
/linux-4.1.27/arch/powerpc/platforms/cell/ |
H A D | cpufreq_spudemand.c | 38 struct delayed_work work; member in struct:spu_gov_info_struct 58 static void spu_gov_work(struct work_struct *work) spu_gov_work() argument 64 info = container_of(work, struct spu_gov_info_struct, work.work); spu_gov_work() 73 schedule_delayed_work_on(info->policy->cpu, &info->work, delay); spu_gov_work() 79 INIT_DEFERRABLE_WORK(&info->work, spu_gov_work); spu_gov_init_work() 80 schedule_delayed_work_on(info->policy->cpu, &info->work, delay); spu_gov_init_work() 85 cancel_delayed_work_sync(&info->work); spu_gov_cancel_work()
|
/linux-4.1.27/fs/ocfs2/cluster/ |
H A D | tcp_internal.h | 114 * be attempted before canceling delayed connect work and flushing the 126 * quorum work */ 136 /* all of these sc work structs hold refs on the sc while they are 140 /* rx and connect work are generated from socket callbacks. sc 141 * shutdown removes the callbacks and then flushes the work queue */ 144 /* shutdown work is triggered in two ways. the simple way is 146 * the sc from the nn, and queues the work. in this case the 147 * work is single-shot. the work is also queued from a sock 148 * callback, though, and in this case the work will find the sc 150 * ends up triggering the shutdown work again, though nothing 151 * will be done in that second iteration. so work queue teardown 153 * on the work queue so that the shutdown work doesn't remove the
|
H A D | tcp.c | 114 * listen work is only queued by the listening socket callbacks on the 116 * quorum work is queued as sock containers are shutdown.. stop_listening 118 * and queued quroum work, before canceling delayed quorum work and 119 * destroying the work queue. 137 static void o2net_sc_connect_completed(struct work_struct *work); 138 static void o2net_rx_until_empty(struct work_struct *work); 139 static void o2net_shutdown_sc(struct work_struct *work); 141 static void o2net_sc_send_keep_req(struct work_struct *work); 475 struct work_struct *work) o2net_sc_queue_work() 478 if (!queue_work(o2net_wq, work)) o2net_sc_queue_work() 482 struct delayed_work *work, o2net_sc_queue_delayed_work() 486 if (!queue_delayed_work(o2net_wq, work, delay)) o2net_sc_queue_delayed_work() 490 struct delayed_work *work) o2net_sc_cancel_delayed_work() 492 if (cancel_delayed_work(work)) o2net_sc_cancel_delayed_work() 564 * the work queue actually being up. */ o2net_set_nn_state() 578 * Delay the expired work after idle timeout. o2net_set_nn_state() 581 * through here but we only cancel the connect_expired work when o2net_set_nn_state() 583 * the connect_expired work will do anything. The rest will see o2net_set_nn_state() 657 * we register callbacks so we can queue work on events before calling 720 * This work queue function performs the blocking parts of socket shutdown. A 727 static void o2net_shutdown_sc(struct work_struct *work) o2net_shutdown_sc() argument 730 container_of(work, struct o2net_sock_container, o2net_shutdown_sc() 739 * races with pending sc work structs are harmless */ o2net_shutdown_sc() 1436 /* this work func is triggerd by data ready. it reads until it can read no 1438 * our work the work struct will be marked and we'll be called again. */ o2net_rx_until_empty() 1439 static void o2net_rx_until_empty(struct work_struct *work) o2net_rx_until_empty() argument 1442 container_of(work, struct o2net_sock_container, sc_rx_work); o2net_rx_until_empty() 1507 static void o2net_sc_connect_completed(struct work_struct *work) o2net_sc_connect_completed() argument 1510 container_of(work, struct o2net_sock_container, o2net_sc_connect_completed() 1523 static void o2net_sc_send_keep_req(struct work_struct *work) o2net_sc_send_keep_req() argument 1526 container_of(work, struct o2net_sock_container, o2net_sc_send_keep_req() 1527 sc_keepalive_work.work); o2net_sc_send_keep_req() 1590 /* this work func is kicked whenever a path sets the nn state which doesn't 1595 static void o2net_start_connect(struct work_struct *work) o2net_start_connect() argument 1598 container_of(work, struct o2net_node, nn_connect_work.work); o2net_start_connect() 1726 static void o2net_connect_expired(struct work_struct *work) o2net_connect_expired() argument 1729 container_of(work, struct o2net_node, nn_connect_expired.work); o2net_connect_expired() 1745 static void o2net_still_up(struct work_struct *work) o2net_still_up() argument 1748 container_of(work, struct o2net_node, nn_still_up.work); o2net_still_up() 1985 static void o2net_accept_many(struct work_struct *work) o2net_accept_many() argument 2099 * o2nm_this_node() doesn't work yet as we're being called while it 2127 /* again, o2nm_this_node() doesn't work here as we're involved in 2137 /* stop the listening socket from generating work */ o2net_stop_listening() 2151 /* finish all work and tear down the work queue */ o2net_stop_listening() 474 o2net_sc_queue_work(struct o2net_sock_container *sc, struct work_struct *work) o2net_sc_queue_work() argument 481 o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, struct delayed_work *work, int delay) o2net_sc_queue_delayed_work() argument 489 o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, struct delayed_work *work) o2net_sc_cancel_delayed_work() argument
|
/linux-4.1.27/drivers/iio/trigger/ |
H A D | iio-trig-sysfs.c | 20 struct irq_work work; member in struct:iio_sysfs_trig 94 static void iio_sysfs_trigger_work(struct irq_work *work) iio_sysfs_trigger_work() argument 96 struct iio_sysfs_trig *trig = container_of(work, struct iio_sysfs_trig, iio_sysfs_trigger_work() 97 work); iio_sysfs_trigger_work() 108 irq_work_queue(&sysfs_trig->work); iio_sysfs_trigger_poll() 166 init_irq_work(&t->work, iio_sysfs_trigger_work); iio_sysfs_trigger_probe()
|
/linux-4.1.27/drivers/usb/chipidea/ |
H A D | otg.c | 88 * @work: work struct 90 static void ci_otg_work(struct work_struct *work) ci_otg_work() argument 92 struct ci_hdrc *ci = container_of(work, struct ci_hdrc, work); ci_otg_work() 120 INIT_WORK(&ci->work, ci_otg_work); ci_hdrc_otg_init()
|
H A D | otg.h | 23 queue_work(ci->wq, &ci->work); ci_otg_queue_work()
|
/linux-4.1.27/arch/sh/drivers/ |
H A D | push-switch.c | 33 schedule_work(&psw->work); switch_timer() 36 static void switch_work_handler(struct work_struct *work) switch_work_handler() argument 38 struct push_switch *psw = container_of(work, struct push_switch, work); switch_work_handler() 80 INIT_WORK(&psw->work, switch_work_handler); switch_drv_probe() 110 flush_work(&psw->work); switch_drv_remove()
|
/linux-4.1.27/net/mac802154/ |
H A D | tx.c | 38 struct work_struct work; member in struct:ieee802154_xmit_cb 44 static void ieee802154_xmit_worker(struct work_struct *work) ieee802154_xmit_worker() argument 47 container_of(work, struct ieee802154_xmit_cb, work); ieee802154_xmit_worker() 109 INIT_WORK(&ieee802154_xmit_cb.work, ieee802154_xmit_worker); ieee802154_tx() 113 queue_work(local->workqueue, &ieee802154_xmit_cb.work); ieee802154_tx()
|
/linux-4.1.27/drivers/net/wireless/ath/ath9k/ |
H A D | htc_drv_gpio.c | 60 * This is the master bt coex work which runs for every 64 static void ath_btcoex_period_work(struct work_struct *work) ath_btcoex_period_work() argument 66 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv, ath_btcoex_period_work() 67 coex_period_work.work); ath_btcoex_period_work() 98 static void ath_btcoex_duty_cycle_work(struct work_struct *work) ath_btcoex_duty_cycle_work() argument 100 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv, ath_btcoex_duty_cycle_work() 101 duty_cycle_work.work); ath_btcoex_duty_cycle_work() 106 ath_dbg(common, BTCOEX, "time slice work for bt and wlan\n"); ath_btcoex_duty_cycle_work() 131 * (Re)start btcoex work 139 ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex work\n"); ath_htc_resume_btcoex_work() 150 * Cancel btcoex and bt duty cycle work. 226 void ath9k_led_work(struct work_struct *work) ath9k_led_work() argument 228 struct ath9k_htc_priv *priv = container_of(work, ath9k_led_work()
|
/linux-4.1.27/drivers/power/ |
H A D | jz4740-battery.c | 51 struct delayed_work work; member in struct:jz_battery 177 mod_delayed_work(system_wq, &jz_battery->work, 0); jz_battery_external_power_changed() 184 mod_delayed_work(system_wq, &jz_battery->work, 0); jz_battery_charge_irq() 231 static void jz_battery_work(struct work_struct *work) jz_battery_work() argument 235 struct jz_battery *jz_battery = container_of(work, struct jz_battery, jz_battery_work() 236 work.work); jz_battery_work() 239 schedule_delayed_work(&jz_battery->work, interval); jz_battery_work() 294 INIT_DELAYED_WORK(&jz_battery->work, jz_battery_work); jz_battery_probe() 347 schedule_delayed_work(&jz_battery->work, 0); jz_battery_probe() 366 cancel_delayed_work_sync(&jz_battery->work); jz_battery_remove() 386 cancel_delayed_work_sync(&jz_battery->work); jz_battery_suspend() 396 schedule_delayed_work(&jz_battery->work, 0); jz_battery_resume()
|
H A D | max17040_battery.c | 42 struct delayed_work work; member in struct:max17040_chip 179 static void max17040_work(struct work_struct *work) max17040_work() argument 183 chip = container_of(work, struct max17040_chip, work.work); max17040_work() 190 queue_delayed_work(system_power_efficient_wq, &chip->work, max17040_work() 239 INIT_DEFERRABLE_WORK(&chip->work, max17040_work); max17040_probe() 240 queue_delayed_work(system_power_efficient_wq, &chip->work, max17040_probe() 251 cancel_delayed_work(&chip->work); max17040_remove() 262 cancel_delayed_work(&chip->work); max17040_suspend() 271 queue_delayed_work(system_power_efficient_wq, &chip->work, max17040_resume()
|
H A D | ltc2941-battery-gauge.c | 64 struct delayed_work work; /* Work scheduler */ member in struct:ltc294x_info 351 static void ltc294x_work(struct work_struct *work) ltc294x_work() argument 355 info = container_of(work, struct ltc294x_info, work.work); ltc294x_work() 357 schedule_delayed_work(&info->work, LTC294X_WORK_DELAY * HZ); ltc294x_work() 372 cancel_delayed_work(&info->work); ltc294x_i2c_remove() 471 INIT_DELAYED_WORK(&info->work, ltc294x_work); ltc294x_i2c_probe() 486 schedule_delayed_work(&info->work, LTC294X_WORK_DELAY * HZ); ltc294x_i2c_probe() 510 cancel_delayed_work(&info->work); ltc294x_suspend() 519 schedule_delayed_work(&info->work, LTC294X_WORK_DELAY * HZ); ltc294x_resume()
|
H A D | da9030_battery.c | 98 struct delayed_work work; member in struct:da9030_charger 294 static void da9030_charging_monitor(struct work_struct *work) da9030_charging_monitor() argument 298 charger = container_of(work, struct da9030_charger, work.work); da9030_charging_monitor() 303 schedule_delayed_work(&charger->work, charger->interval); da9030_charging_monitor() 409 cancel_delayed_work_sync(&charger->work); da9030_battery_event() 410 schedule_work(&charger->work.work); da9030_battery_event() 532 INIT_DELAYED_WORK(&charger->work, da9030_charging_monitor); da9030_battery_probe() 533 schedule_delayed_work(&charger->work, charger->interval); da9030_battery_probe() 562 cancel_delayed_work(&charger->work); da9030_battery_probe() 577 cancel_delayed_work_sync(&charger->work); da9030_battery_remove()
|
/linux-4.1.27/drivers/media/pci/cx23885/ |
H A D | cx23885-ir.h | 24 void cx23885_ir_rx_work_handler(struct work_struct *work); 25 void cx23885_ir_tx_work_handler(struct work_struct *work);
|
H A D | cx23885-av.h | 21 void cx23885_av_work_handler(struct work_struct *work);
|
H A D | cx23885-ir.c | 33 void cx23885_ir_rx_work_handler(struct work_struct *work) cx23885_ir_rx_work_handler() argument 36 container_of(work, struct cx23885_dev, ir_rx_work); cx23885_ir_rx_work_handler() 56 void cx23885_ir_tx_work_handler(struct work_struct *work) cx23885_ir_tx_work_handler() argument 59 container_of(work, struct cx23885_dev, ir_tx_work); cx23885_ir_tx_work_handler()
|
H A D | cx23885-av.c | 23 void cx23885_av_work_handler(struct work_struct *work) cx23885_av_work_handler() argument 26 container_of(work, struct cx23885_dev, cx25840_work); cx23885_av_work_handler()
|
/linux-4.1.27/drivers/s390/char/ |
H A D | sclp_config.c | 29 static void sclp_cpu_capability_notify(struct work_struct *work) sclp_cpu_capability_notify() argument 44 static void __ref sclp_cpu_change_notify(struct work_struct *work) sclp_cpu_change_notify() argument
|
/linux-4.1.27/drivers/staging/speakup/ |
H A D | selection.c | 130 struct work_struct work; member in struct:speakup_paste_work 134 static void __speakup_paste_selection(struct work_struct *work) __speakup_paste_selection() argument 137 container_of(work, struct speakup_paste_work, work); __speakup_paste_selection() 171 .work = __WORK_INITIALIZER(speakup_paste_work.work, 181 schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work); speakup_paste_selection() 187 cancel_work_sync(&speakup_paste_work.work); speakup_cancel_paste()
|
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvkm/core/ |
H A D | notify.h | 22 struct work_struct work; member in struct:nvkm_notify 31 int (*func)(struct nvkm_notify *), bool work,
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/core/ |
H A D | notify.c | 45 flush_work(¬ify->work); nvkm_notify_put() 84 nvkm_notify_work(struct work_struct *work) nvkm_notify_work() argument 86 struct nvkm_notify *notify = container_of(work, typeof(*notify), work); nvkm_notify_work() 109 schedule_work(¬ify->work); nvkm_notify_send() 133 int (*func)(struct nvkm_notify *), bool work, nvkm_notify_init() 146 if (ret = 0, work) { nvkm_notify_init() 147 INIT_WORK(¬ify->work, nvkm_notify_work); nvkm_notify_init() 132 nvkm_notify_init(struct nvkm_object *object, struct nvkm_event *event, int (*func)(struct nvkm_notify *), bool work, void *data, u32 size, u32 reply, struct nvkm_notify *notify) nvkm_notify_init() argument
|
/linux-4.1.27/drivers/char/tpm/ |
H A D | tpm-dev.c | 32 struct work_struct work; member in struct:file_priv 41 schedule_work(&priv->work); user_reader_timeout() 44 static void timeout_work(struct work_struct *work) timeout_work() argument 46 struct file_priv *priv = container_of(work, struct file_priv, work); timeout_work() 79 INIT_WORK(&priv->work, timeout_work); tpm_open() 94 flush_work(&priv->work); tpm_read() 165 flush_work(&priv->work); tpm_release()
|
/linux-4.1.27/arch/mips/include/asm/mach-loongson/ |
H A D | loongson_hwmon.h | 34 /* loongson_fan_policy works when fan work at FAN_AUTO_MODE */ 52 struct delayed_work work; member in struct:loongson_fan_policy
|
/linux-4.1.27/arch/arm/include/asm/ |
H A D | ftrace.h | 37 * return_address uses walk_stackframe to do it's work. If both 39 * information. For this to work in the function tracer many functions would
|
/linux-4.1.27/drivers/gpu/drm/nouveau/ |
H A D | nouveau_fence.c | 203 struct work_struct work; member in struct:nouveau_fence_work 212 struct nouveau_fence_work *work = container_of(kwork, typeof(*work), work); nouveau_fence_work_handler() local 213 work->func(work->data); nouveau_fence_work_handler() 214 kfree(work); nouveau_fence_work_handler() 219 struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb); nouveau_fence_work_cb() local 221 schedule_work(&work->work); nouveau_fence_work_cb() 228 struct nouveau_fence_work *work; nouveau_fence_work() local 233 work = kmalloc(sizeof(*work), GFP_KERNEL); nouveau_fence_work() 234 if (!work) { nouveau_fence_work() 244 INIT_WORK(&work->work, nouveau_fence_work_handler); nouveau_fence_work() 245 work->func = func; nouveau_fence_work() 246 work->data = data; nouveau_fence_work() 248 if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0) nouveau_fence_work() 253 kfree(work); nouveau_fence_work() 533 * This needs uevents to work correctly, but fence_add_callback relies on nouveau_fence_no_signaling()
|
/linux-4.1.27/drivers/gpu/drm/sti/ |
H A D | sti_drm_drv.h | 30 struct work_struct work; member in struct:sti_drm_private::__anon4408
|
/linux-4.1.27/include/uapi/linux/ |
H A D | reiserfs_fs.h | 20 lsattr(1) will work with us. */
|
H A D | udf_fs_i.h | 10 * Each contributing author retains all rights to their own work.
|
/linux-4.1.27/arch/sh/include/asm/ |
H A D | push-switch.h | 15 struct work_struct work; member in struct:push_switch
|
H A D | shmparam.h | 16 * for everyone, and work out the specifics from the probed cache descriptor.
|
/linux-4.1.27/arch/avr32/include/asm/ |
H A D | thread_info.h | 65 * - pending work-to-be-done flags are in LSW 91 /* work to do on interrupt/exception return */ 98 /* work to do on any return to userspace */ 100 /* work to do on return from debug mode */
|
/linux-4.1.27/arch/m68k/include/asm/ |
H A D | vga.h | 10 * This should make cirrusfb work again on Amiga
|
H A D | timex.h | 12 * to make ntp work best. For Coldfires, that's the main clock.
|
/linux-4.1.27/net/rds/ |
H A D | threads.c | 41 * work queues that execute in a connection managing thread. 139 void rds_connect_worker(struct work_struct *work) rds_connect_worker() argument 141 struct rds_connection *conn = container_of(work, struct rds_connection, c_conn_w.work); rds_connect_worker() 159 void rds_send_worker(struct work_struct *work) rds_send_worker() argument 161 struct rds_connection *conn = container_of(work, struct rds_connection, c_send_w.work); rds_send_worker() 181 void rds_recv_worker(struct work_struct *work) rds_recv_worker() argument 183 struct rds_connection *conn = container_of(work, struct rds_connection, c_recv_w.work); rds_recv_worker() 203 void rds_shutdown_worker(struct work_struct *work) rds_shutdown_worker() argument 205 struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w); rds_shutdown_worker()
|
/linux-4.1.27/scripts/ |
H A D | extract-ikconfig | 5 # This will only work when the kernel was compiled with CONFIG_IKCONFIG. 7 # The obscure use of the "tr" filter is to work around older versions of 58 # That didn't work, so retry after decompression.
|
H A D | extract-vmlinux | 26 # The obscure use of the "tr" filter is to work around older versions of 54 # That didn't work, so retry after decompression.
|
/linux-4.1.27/drivers/net/wireless/mwifiex/ |
H A D | 11h.c | 119 /* This is DFS CAC work queue function. 120 * This delayed work emits CAC finished event for cfg80211 if 123 void mwifiex_dfs_cac_work_queue(struct work_struct *work) mwifiex_dfs_cac_work_queue() argument 127 container_of(work, struct delayed_work, work); mwifiex_dfs_cac_work_queue() 178 "Aborting delayed work for CAC.\n"); mwifiex_abort_cac() 187 * and also cancels ongoing delayed work. 260 /* This is work queue function for channel switch handling. 265 void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work) mwifiex_dfs_chan_sw_work_queue() argument 269 container_of(work, struct delayed_work, work); mwifiex_dfs_chan_sw_work_queue()
|
/linux-4.1.27/drivers/media/usb/tm6000/ |
H A D | tm6000-input.c | 66 struct delayed_work work; member in struct:tm6000_IR 208 schedule_delayed_work(&ir->work, msecs_to_jiffies(URB_SUBMIT_DELAY)); tm6000_ir_urb_received() 226 schedule_delayed_work(&ir->work, msecs_to_jiffies(10)); tm6000_ir_urb_received() 229 static void tm6000_ir_handle_key(struct work_struct *work) tm6000_ir_handle_key() argument 231 struct tm6000_IR *ir = container_of(work, struct tm6000_IR, work.work); tm6000_ir_handle_key() 261 schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling)); tm6000_ir_handle_key() 264 static void tm6000_ir_int_work(struct work_struct *work) tm6000_ir_int_work() argument 266 struct tm6000_IR *ir = container_of(work, struct tm6000_IR, work.work); tm6000_ir_int_work() 282 schedule_delayed_work(&ir->work, msecs_to_jiffies(URB_SUBMIT_DELAY)); tm6000_ir_int_work() 292 schedule_delayed_work(&ir->work, msecs_to_jiffies(URB_INT_LED_DELAY)); tm6000_ir_int_work() 305 schedule_delayed_work(&ir->work, 0); tm6000_ir_start() 316 cancel_delayed_work_sync(&ir->work); tm6000_ir_stop() 375 schedule_delayed_work(&ir->work, msecs_to_jiffies(URB_SUBMIT_DELAY)); __tm6000_ir_int_start() 452 INIT_DELAYED_WORK(&ir->work, tm6000_ir_int_work); tm6000_ir_init() 457 INIT_DELAYED_WORK(&ir->work, tm6000_ir_handle_key); tm6000_ir_init()
|
/linux-4.1.27/drivers/misc/cxl/ |
H A D | file.c | 138 struct cxl_ioctl_start_work work; afu_ioctl_start_work() local 146 if (copy_from_user(&work, uwork, afu_ioctl_start_work() 162 if (work.reserved1 || work.reserved2 || work.reserved3 || afu_ioctl_start_work() 163 work.reserved4 || work.reserved5 || work.reserved6 || afu_ioctl_start_work() 164 (work.flags & ~CXL_START_WORK_ALL)) { afu_ioctl_start_work() 169 if (!(work.flags & CXL_START_WORK_NUM_IRQS)) afu_ioctl_start_work() 170 work.num_interrupts = ctx->afu->pp_irqs; afu_ioctl_start_work() 171 else if ((work.num_interrupts < ctx->afu->pp_irqs) || afu_ioctl_start_work() 172 (work.num_interrupts > ctx->afu->irqs_max)) { afu_ioctl_start_work() 176 if ((rc = afu_register_irqs(ctx, work.num_interrupts))) afu_ioctl_start_work() 179 if (work.flags & CXL_START_WORK_AMR) afu_ioctl_start_work() 180 amr = work.amr & mfspr(SPRN_UAMOR); afu_ioctl_start_work() 190 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); afu_ioctl_start_work() 192 if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor, afu_ioctl_start_work()
|
/linux-4.1.27/drivers/gpu/drm/i915/ |
H A D | intel_fbc.c | 327 struct intel_fbc_work *work = intel_fbc_work_fn() local 329 struct intel_fbc_work, work); intel_fbc_work_fn() 330 struct drm_device *dev = work->crtc->dev; intel_fbc_work_fn() 334 if (work == dev_priv->fbc.fbc_work) { intel_fbc_work_fn() 336 * the prior work. intel_fbc_work_fn() 338 if (work->crtc->primary->fb == work->fb) { intel_fbc_work_fn() 339 dev_priv->display.enable_fbc(work->crtc); intel_fbc_work_fn() 341 dev_priv->fbc.crtc = to_intel_crtc(work->crtc); intel_fbc_work_fn() 342 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id; intel_fbc_work_fn() 343 dev_priv->fbc.y = work->crtc->y; intel_fbc_work_fn() 350 kfree(work); intel_fbc_work_fn() 364 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work)) intel_fbc_cancel_work() 368 /* Mark the work as no longer wanted so that if it does intel_fbc_cancel_work() 369 * wake-up (because the work was already running and waiting intel_fbc_cancel_work() 378 struct intel_fbc_work *work; intel_fbc_enable() local 387 work = kzalloc(sizeof(*work), GFP_KERNEL); intel_fbc_enable() 388 if (work == NULL) { intel_fbc_enable() 389 DRM_ERROR("Failed to allocate FBC work structure\n"); intel_fbc_enable() 394 work->crtc = crtc; intel_fbc_enable() 395 work->fb = crtc->primary->fb; intel_fbc_enable() 396 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); intel_fbc_enable() 398 dev_priv->fbc.fbc_work = work; intel_fbc_enable() 413 schedule_delayed_work(&work->work, msecs_to_jiffies(50)); intel_fbc_enable()
|
H A D | i915_gem_userptr.c | 41 struct work_struct work; member in struct:i915_mm_struct 72 obj->userptr.work = NULL; cancel_userptr() 241 * either has cancelled gup work queued and we need to i915_mmu_notifier_add() 467 __i915_mm_struct_free__worker(struct work_struct *work) __i915_mm_struct_free__worker() argument 469 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); __i915_mm_struct_free__worker() 484 INIT_WORK(&mm->work, __i915_mm_struct_free__worker); __i915_mm_struct_free() 485 schedule_work(&mm->work); __i915_mm_struct_free() 501 struct work_struct work; member in struct:get_pages_work 548 struct get_pages_work *work = container_of(_work, typeof(*work), work); __i915_gem_userptr_get_pages_worker() local 549 struct drm_i915_gem_object *obj = work->obj; __i915_gem_userptr_get_pages_worker() 567 ret = get_user_pages(work->task, mm, __i915_gem_userptr_get_pages_worker() 581 if (obj->userptr.work != &work->work) { __i915_gem_userptr_get_pages_worker() 591 obj->userptr.work = ERR_PTR(ret); __i915_gem_userptr_get_pages_worker() 599 put_task_struct(work->task); __i915_gem_userptr_get_pages_worker() 600 kfree(work); __i915_gem_userptr_get_pages_worker() 663 * obj->userptr.work = ERR_PTR. i915_gem_userptr_get_pages() 666 if (obj->userptr.work == NULL && i915_gem_userptr_get_pages() 668 struct get_pages_work *work; i915_gem_userptr_get_pages() local 670 work = kmalloc(sizeof(*work), GFP_KERNEL); i915_gem_userptr_get_pages() 671 if (work != NULL) { i915_gem_userptr_get_pages() 672 obj->userptr.work = &work->work; i915_gem_userptr_get_pages() 675 work->obj = obj; i915_gem_userptr_get_pages() 678 work->task = current; i915_gem_userptr_get_pages() 679 get_task_struct(work->task); i915_gem_userptr_get_pages() 681 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); i915_gem_userptr_get_pages() 682 schedule_work(&work->work); i915_gem_userptr_get_pages() 686 if (IS_ERR(obj->userptr.work)) { i915_gem_userptr_get_pages() 687 ret = PTR_ERR(obj->userptr.work); i915_gem_userptr_get_pages() 688 obj->userptr.work = NULL; i915_gem_userptr_get_pages() 695 obj->userptr.work = NULL; i915_gem_userptr_get_pages() 710 BUG_ON(obj->userptr.work != NULL); i915_gem_userptr_put_pages()
|
/linux-4.1.27/drivers/net/wireless/rt2x00/ |
H A D | rt2x00link.c | 244 * should never have to work with link tuners. rt2x00link_start_tuner() 262 &link->work, LINK_TUNE_INTERVAL); rt2x00link_start_tuner() 267 cancel_delayed_work_sync(&rt2x00dev->link.work); rt2x00link_stop_tuner() 317 static void rt2x00link_tuner(struct work_struct *work) rt2x00link_tuner() argument 320 container_of(work, struct rt2x00_dev, link.work.work); rt2x00link_tuner() 377 &link->work, LINK_TUNE_INTERVAL); rt2x00link_tuner() 396 static void rt2x00link_watchdog(struct work_struct *work) rt2x00link_watchdog() argument 399 container_of(work, struct rt2x00_dev, link.watchdog_work.work); rt2x00link_watchdog() 449 static void rt2x00link_agc(struct work_struct *work) rt2x00link_agc() argument 452 container_of(work, struct rt2x00_dev, link.agc_work.work); rt2x00link_agc() 470 static void rt2x00link_vcocal(struct work_struct *work) rt2x00link_vcocal() argument 473 container_of(work, struct rt2x00_dev, link.vco_work.work); rt2x00link_vcocal() 497 INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner); rt2x00link_register()
|
/linux-4.1.27/drivers/isdn/mISDN/ |
H A D | timerdev.c | 39 u_int work; member in struct:mISDNtimerdev 64 dev->work = 0; mISDN_open() 115 while (list_empty(list) && (dev->work == 0)) { mISDN_read() 119 wait_event_interruptible(dev->wait, (dev->work || mISDN_read() 125 if (dev->work) mISDN_read() 126 dev->work = 0; mISDN_read() 153 if (dev->work || !list_empty(&dev->expired)) mISDN_poll() 156 printk(KERN_DEBUG "%s work(%d) empty(%d)\n", __func__, mISDN_poll() 157 dev->work, list_empty(&dev->expired)); mISDN_poll() 182 dev->work = 1; misdn_add_timer()
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvif/ |
H A D | notify.c | 57 flush_work(¬ify->work); nvif_notify_put() 105 nvif_notify_work(struct work_struct *work) nvif_notify_work() argument 107 struct nvif_notify *notify = container_of(work, typeof(*notify), work); nvif_notify_work() 132 schedule_work(¬ify->work); nvif_notify() 168 int (*func)(struct nvif_notify *), bool work, u8 event, nvif_notify_init() 186 if (work) { nvif_notify_init() 187 INIT_WORK(¬ify->work, nvif_notify_work); nvif_notify_init() 233 bool work, u8 type, void *data, u32 size, u32 reply, nvif_notify_new() 238 int ret = nvif_notify_init(object, nvif_notify_del, func, work, nvif_notify_new() 167 nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *), int (*func)(struct nvif_notify *), bool work, u8 event, void *data, u32 size, u32 reply, struct nvif_notify *notify) nvif_notify_init() argument 232 nvif_notify_new(struct nvif_object *object, int (*func)(struct nvif_notify *), bool work, u8 type, void *data, u32 size, u32 reply, struct nvif_notify **pnotify) nvif_notify_new() argument
|
/linux-4.1.27/drivers/gpu/drm/radeon/ |
H A D | radeon_mn.c | 46 struct work_struct work; member in struct:radeon_mn 64 * @work: previously sheduled work item 66 * Lazy destroys the notifier from a work item 68 static void radeon_mn_destroy(struct work_struct *work) radeon_mn_destroy() argument 70 struct radeon_mn *rmn = container_of(work, struct radeon_mn, work); radeon_mn_destroy() 100 * Shedule a work item to lazy destroy our notifier. 106 INIT_WORK(&rmn->work, radeon_mn_destroy); radeon_mn_release() 107 schedule_work(&rmn->work); radeon_mn_release()
|
H A D | radeon_irq_kms.c | 64 * radeon_hotplug_work_func - display hotplug work handler 66 * @work: work struct 68 * This is the hot plug event work handler (all asics). 69 * The work gets scheduled from the irq handler if there 74 static void radeon_hotplug_work_func(struct work_struct *work) radeon_hotplug_work_func() argument 76 struct radeon_device *rdev = container_of(work, struct radeon_device, radeon_hotplug_work_func() 92 static void radeon_dp_work_func(struct work_struct *work) radeon_dp_work_func() argument 94 struct radeon_device *rdev = container_of(work, struct radeon_device, radeon_dp_work_func() 200 /* MSIs don't work on AGP */ radeon_msi_ok() 221 /* HP RS690 only seems to work with MSIs. */ radeon_msi_ok() 227 /* Dell RS690 only seems to work with MSIs. */ radeon_msi_ok() 233 /* Dell RS690 only seems to work with MSIs. */ radeon_msi_ok() 239 /* Gateway RS690 only seems to work with MSIs. */ radeon_msi_ok() 256 /* APUs work fine with MSIs */ radeon_msi_ok() 271 * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics). 315 * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
|
/linux-4.1.27/arch/mips/cavium-octeon/executive/ |
H A D | cvmx-helper-util.c | 86 * @work: Work queue entry containing the packet to dump 89 int cvmx_helper_dump_packet(cvmx_wqe_t *work) cvmx_helper_dump_packet() argument 98 cvmx_dprintf("Packet Length: %u\n", work->len); cvmx_helper_dump_packet() 99 cvmx_dprintf(" Input Port: %u\n", work->ipprt); cvmx_helper_dump_packet() 100 cvmx_dprintf(" QoS: %u\n", work->qos); cvmx_helper_dump_packet() 101 cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs); cvmx_helper_dump_packet() 103 if (work->word2.s.bufs == 0) { cvmx_helper_dump_packet() 109 buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data); cvmx_helper_dump_packet() 110 if (likely(!work->word2.s.not_IP)) { cvmx_helper_dump_packet() 115 work->word2.s.ip_offset; cvmx_helper_dump_packet() 116 buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2; cvmx_helper_dump_packet() 129 buffer_ptr = work->packet_ptr; cvmx_helper_dump_packet() 130 remaining_bytes = work->len; cvmx_helper_dump_packet()
|
H A D | cvmx-interrupt-decodes.c | 80 /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ __cvmx_interrupt_gmxx_rxx_int_en_enable() 100 /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */ __cvmx_interrupt_gmxx_rxx_int_en_enable() 102 /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ __cvmx_interrupt_gmxx_rxx_int_en_enable() 125 /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ __cvmx_interrupt_gmxx_rxx_int_en_enable() 145 /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */ __cvmx_interrupt_gmxx_rxx_int_en_enable() 147 /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ __cvmx_interrupt_gmxx_rxx_int_en_enable() 167 /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */ __cvmx_interrupt_gmxx_rxx_int_en_enable() 169 /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ __cvmx_interrupt_gmxx_rxx_int_en_enable() 190 /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */ __cvmx_interrupt_gmxx_rxx_int_en_enable() 192 /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ __cvmx_interrupt_gmxx_rxx_int_en_enable() 221 /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */ __cvmx_interrupt_gmxx_rxx_int_en_enable()
|
/linux-4.1.27/drivers/net/wireless/ath/ath10k/ |
H A D | mac.h | 38 void ath10k_scan_timeout_work(struct work_struct *work); 40 void ath10k_offchan_tx_work(struct work_struct *work); 42 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
|
/linux-4.1.27/drivers/media/usb/au0828/ |
H A D | au0828-input.c | 42 struct delayed_work work; member in struct:au0828_rc 173 * will work, as we need to add a hack for each au0828_get_key_au8522() 230 static void au0828_rc_work(struct work_struct *work) au0828_rc_work() argument 232 struct au0828_rc *ir = container_of(work, struct au0828_rc, work.work); au0828_rc_work() 239 schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling)); au0828_rc_work() 246 INIT_DELAYED_WORK(&ir->work, au0828_rc_work); au0828_rc_start() 251 schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling)); au0828_rc_start() 260 cancel_delayed_work_sync(&ir->work); au0828_rc_stop() 389 cancel_delayed_work_sync(&ir->work); au0828_rc_suspend() 409 schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling)); au0828_rc_resume()
|
/linux-4.1.27/drivers/staging/fwserial/ |
H A D | fwserial.h | 65 * @work: only one work item can be queued at any one time 66 * Note: pending work is canceled prior to removal, so this 67 * peer is valid for at least the lifetime of the work function 68 * @work_params: parameter block for work functions 71 * @connect: work item for auto-connecting 95 struct work_struct work; member in struct:fwtty_peer 199 * @emit_breaks: delayed work responsible for generating breaks when the 203 * @hangup: work responsible for HUPing when carrier is dropped/lost 216 * @drain: delayed work scheduled to ensure that writes are flushed. 217 * The work can race with the writer but concurrent sending is 358 * is not necessary and does not work, because
|
/linux-4.1.27/drivers/oprofile/ |
H A D | cpu_buffer.c | 36 static void wq_sync_buffer(struct work_struct *work); 86 INIT_DELAYED_WORK(&b->work, wq_sync_buffer); for_each_possible_cpu() 105 * Spread the work by 1 jiffy per cpu so they dont all for_each_online_cpu() 108 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); for_each_online_cpu() 125 flush_delayed_work(&b->work); for_each_online_cpu() 450 static void wq_sync_buffer(struct work_struct *work) wq_sync_buffer() argument 453 container_of(work, struct oprofile_cpu_buffer, work.work); wq_sync_buffer() 455 cancel_delayed_work(&b->work); wq_sync_buffer() 460 /* don't re-add the work if we're shutting down */ wq_sync_buffer() 462 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); wq_sync_buffer()
|
/linux-4.1.27/drivers/usb/misc/ |
H A D | appledisplay.c | 6 * Thanks to Caskey L. Dickson for his work with acdctl. 60 /* table of devices that work with this driver */ 81 struct delayed_work work; member in struct:appledisplay 125 queue_delayed_work(wq, &pdata->work, 0); appledisplay_complete() 195 static void appledisplay_work(struct work_struct *work) appledisplay_work() argument 198 container_of(work, struct appledisplay, work.work); appledisplay_work() 207 schedule_delayed_work(&pdata->work, HZ / 8); appledisplay_work() 249 INIT_DELAYED_WORK(&pdata->work, appledisplay_work); appledisplay_probe() 347 cancel_delayed_work(&pdata->work); appledisplay_disconnect() 370 printk(KERN_ERR "appledisplay: Could not create work queue\n"); appledisplay_init()
|
/linux-4.1.27/arch/s390/include/asm/ |
H A D | ccwgroup.h | 13 * @ungroup_work: work to be done when a ccwgroup notifier has action 39 * @complete: undo work done in @prepare 41 * @thaw: undo work done in @freeze
|
/linux-4.1.27/arch/c6x/include/asm/ |
H A D | thread_info.h | 83 * - pending work-to-be-done flags are in LSW 94 #define TIF_WORK_MASK 0x00007FFE /* work on irq/exception return */ 95 #define TIF_ALLWORK_MASK 0x00007FFF /* work on any return to u-space */
|
/linux-4.1.27/arch/cris/include/asm/ |
H A D | thread_info.h | 65 * - pending work-to-be-done flags are in LSW 80 #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 81 #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
/linux-4.1.27/tools/power/cpupower/debug/i386/ |
H A D | intel_gsic.c | 50 "doesn't work out of the box,\nyou may want to " main() 57 "doesn't work out of the box,\nyou may want to " main() 70 "work.\nFor this, you need to pass some arguments to " main()
|
/linux-4.1.27/drivers/isdn/hisax/ |
H A D | isdnl1.h | 32 void BChannel_bh(struct work_struct *work);
|
/linux-4.1.27/drivers/ps3/ |
H A D | vuart.h | 35 struct work_struct work; member in struct:ps3_vuart_work 37 struct ps3_system_bus_device *dev; /* to convert work to device */ 49 void (*work)(struct ps3_system_bus_device *); member in struct:ps3_vuart_port_driver 72 work); ps3_vuart_work_to_system_bus_dev()
|
/linux-4.1.27/drivers/net/wireless/ti/wl1251/ |
H A D | ps.c | 30 void wl1251_elp_work(struct work_struct *work) wl1251_elp_work() argument 35 dwork = container_of(work, struct delayed_work, work); wl1251_elp_work() 38 wl1251_debug(DEBUG_PSM, "elp work"); wl1251_elp_work()
|
/linux-4.1.27/arch/x86/kernel/ |
H A D | syscall_32.c | 28 * Smells like a compiler bug -- it doesn't work
|
H A D | syscall_64.c | 27 * Smells like a compiler bug -- it doesn't work
|
/linux-4.1.27/arch/x86/lib/ |
H A D | usercopy.c | 14 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
|
/linux-4.1.27/arch/um/drivers/ |
H A D | slirp.h | 9 * initializer doesn't work in slirp_kern.c:
|
/linux-4.1.27/arch/metag/include/asm/ |
H A D | thread_info.h | 102 * - pending work-to-be-done flags are in LSW 128 /* work to do in syscall trace */ 133 /* work to do on any return to u-space */ 139 /* work to do on interrupt/exception return */
|
/linux-4.1.27/arch/mips/include/asm/octeon/ |
H A D | cvmx-helper-util.h | 50 * @work: Work queue entry containing the packet to dump 53 extern int cvmx_helper_dump_packet(cvmx_wqe_t *work); 144 * Free the packet buffers contained in a work queue entry. 145 * The work queue entry is not freed. 147 * @work: Work queue entry with packet to free 149 static inline void cvmx_helper_free_packet_data(cvmx_wqe_t *work) cvmx_helper_free_packet_data() argument 156 number_buffers = work->word2.s.bufs; cvmx_helper_free_packet_data() 159 buffer_ptr = work->packet_ptr; cvmx_helper_free_packet_data() 170 if (cvmx_ptr_to_phys(work) == start_of_buffer) { cvmx_helper_free_packet_data()
|
H A D | cvmx-pow.h | 70 * The work queue entry from the order - NEVER tag switch from 78 * - NULL_NULL can be exited by a new work request. A NULL_SWITCH 122 * create an entirely new work queue entry 127 * just update the work queue pointer and grp for this PP 137 * - does nothing if the stored work queue pointer does not 157 * - does nothing if the stored work queue pointer does not 200 * The group that the work queue entry will be 242 * Address for new work request loads (did<2:0> == 0) 257 * If set, don't return load response until work is 302 * If set, get the work-queue pointer rather than 341 * the standard response for work-queue index (invalid 342 * if the work-queue entry is not on the deschedule 347 * If set, get the work-queue pointer rather than 435 * another piece of work before a POW entry can ever become 468 * Response to new work request loads 473 * Set when no new work queue entry was returned. * 474 * If there was de-scheduled work, the HW will 478 * - There was no work, or 480 * - There was no work that the HW could find. This 482 * in the original request, when there is work in 488 /* 36 in O1 -- the work queue pointer */ 1067 * associated with a QOS level when it reloads work 1074 * work. 1080 * work. 1085 * work on the DRAM input Q list selected by 1110 * associated with a QOS level when it reloads work 1117 * work. 1123 * work. 1128 * work on the DRAM input Q list selected by 1168 * these stores. Note the assumption that the work queue entry is 1233 /* if set, don't return load response until work is available */ 1281 * Get the POW WQE for this core. This returns the work queue 1341 * Synchronous work request. Requests work from the POW. 1345 * @wait: When set, call stalls until work becomes avaiable, or times out. 1348 * Returns Returns the WQE pointer from POW. Returns NULL if no work 1375 * Synchronous work request. Requests work from the POW. 1377 * requesting the new work. 1379 * @wait: When set, call stalls until work becomes avaiable, or times out. 1382 * Returns Returns the WQE pointer from POW. Returns NULL if no work 1390 /* Must not have a switch pending when requesting work */ cvmx_pow_work_request_sync() 1411 /* Must not have a switch pending when requesting work */ cvmx_pow_work_request_null_rd() 1425 * Asynchronous work request. Work is requested from the POW unit, 1435 * @wait: 1 to cause response to wait for work to become available (or 1455 * Asynchronous work request. Work is requested from the POW unit, 1458 * tag switch to complete before requesting the new work. 1464 * @wait: 1 to cause response to wait for work to become available (or 1473 /* Must not have a switch pending when requesting work */ cvmx_pow_work_request_async() 1479 * Gets result of asynchronous work request. Performs a IOBDMA sync 1486 * work was available. 1502 * Checks if a work queue entry pointer returned by a work 1503 * request is valid. It may be invalid due to no work 1506 * @wqe_ptr: pointer to a work queue entry returned by the POW 1509 * 1 if invalid (no work was returned) 1519 * function does NOT update the work queue entry in dram to match tag 1583 * function does NOT update the work queue entry in dram to match tag 1624 * function does NOT update the work queue entry in dram to match tag 1635 * @wqp: pointer to work queue entry to submit. This entry is 1637 * @tag: tag value to be assigned to work queue entry 1639 * @group: group value for the work queue entry. 1698 * function does NOT update the work queue entry in dram to match tag 1709 * @wqp: pointer to work queue entry to submit. This entry is updated 1711 * @tag: tag value to be assigned to work queue entry 1713 * @group: group value for the work queue entry. 1734 * work queue entry. This operation completes immediately, 1772 * work queue entry. This operation completes immediately, 1794 * Submits work to an input queue. This function updates the work 1796 * tag provided is for the work queue entry submitted, and is 1799 * @wqp: pointer to work queue entry to submit. This entry is 1801 * @tag: tag value to be assigned to work queue entry 1804 * @grp: group value for the work queue entry. 1832 * SYNC write to memory before the work submit. This is cvmx_pow_work_submit() 1841 * indicates which groups each core will accept work from. There are 1847 * Each 1 bit in the mask enables the core to accept work from 1939 * after the work (that has an ATOMIC tag) is re-scheduled. 1947 * @no_sched: Control whether this work queue entry will be rescheduled. 1948 * - 1 : don't schedule this work 1949 * - 0 : allow this work to be scheduled. 2022 * after the work (that has an ATOMIC tag) is re-scheduled. 2030 * @no_sched: Control whether this work queue entry will be rescheduled. 2031 * - 1 : don't schedule this work 2032 * - 0 : allow this work to be scheduled. 2041 /* Need to make sure any writes to the work queue entry are complete */ cvmx_pow_tag_sw_desched() 2053 * Descchedules the current work queue entry. 2055 * @no_sched: no schedule flag value to be set on the work queue 2075 /* Need to make sure any writes to the work queue entry are complete */ cvmx_pow_desched()
|
/linux-4.1.27/fs/squashfs/ |
H A D | file_cache.c | 5 * This work is licensed under the terms of the GNU GPL, version 2. See
|
/linux-4.1.27/include/linux/spi/ |
H A D | l4f00242t03.h | 5 * Based on Marek Vasut work in lms283gf05.h
|
/linux-4.1.27/arch/mips/sni/ |
H A D | reset.c | 13 * and if it doesn't work, we do some other stupid things.
|
/linux-4.1.27/arch/blackfin/include/asm/ |
H A D | bug.h | 15 * Anything from 0x0001 to 0x000A (inclusive) will work
|
/linux-4.1.27/arch/arm/mach-shmobile/ |
H A D | sh-gpio.h | 20 * current gpio frame work doesn't have
|
/linux-4.1.27/fs/dlm/ |
H A D | ast.h | 24 void dlm_callback_work(struct work_struct *work);
|
/linux-4.1.27/net/mac80211/ |
H A D | offchannel.c | 23 * the frames while we are doing off-channel work. This is optional 24 * because we *may* be doing work on-operating channel, and want our 210 static void ieee80211_hw_roc_start(struct work_struct *work) ieee80211_hw_roc_start() argument 213 container_of(work, struct ieee80211_local, hw_roc_start); ieee80211_hw_roc_start() 290 * queue the work struct again to avoid recursion ieee80211_start_next_roc() 297 ieee80211_queue_delayed_work(&local->hw, &roc->work, ieee80211_start_next_roc() 332 void ieee80211_sw_roc_work(struct work_struct *work) ieee80211_sw_roc_work() argument 335 container_of(work, struct ieee80211_roc_work, work.work); ieee80211_sw_roc_work() 390 ieee80211_queue_delayed_work(&local->hw, &roc->work, ieee80211_sw_roc_work() 421 static void ieee80211_hw_roc_done(struct work_struct *work) ieee80211_hw_roc_done() argument 424 container_of(work, struct ieee80211_local, hw_roc_done); ieee80211_hw_roc_done() 492 ieee80211_queue_delayed_work(&local->hw, &roc->work, 0); ieee80211_roc_purge() 494 /* work will clean up etc */ ieee80211_roc_purge() 495 flush_delayed_work(&roc->work); ieee80211_roc_purge()
|
/linux-4.1.27/sound/usb/hiface/ |
H A D | chip.h | 9 * The driver is based on the work done in TerraTec DMX 6Fire USB
|
H A D | pcm.h | 9 * The driver is based on the work done in TerraTec DMX 6Fire USB
|
/linux-4.1.27/include/asm-generic/ |
H A D | scatterlist.h | 22 * You should only work with the number of sg entries pci_map_sg
|
H A D | unaligned.h | 6 * and should work almost anywhere.
|
/linux-4.1.27/drivers/scsi/fcoe/ |
H A D | fcoe_sysfs.c | 580 "ERROR: FIP Ctlr '%d' attempted to flush work, " fcoe_ctlr_device_flush_work() 590 * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue 592 * @work: Work to queue for execution 598 struct work_struct *work) fcoe_ctlr_device_queue_work() 602 "ERROR: FIP Ctlr '%d' attempted to queue work, " fcoe_ctlr_device_queue_work() 609 return queue_work(fcoe_ctlr_work_q(ctlr), work); fcoe_ctlr_device_queue_work() 620 "ERROR: FIP Ctlr '%d' attempted to flush work, " fcoe_ctlr_device_flush_devloss() 630 * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue 632 * @work: Work to queue for execution 633 * @delay: jiffies to delay the work queuing 639 struct delayed_work *work, fcoe_ctlr_device_queue_devloss_work() 644 "ERROR: FIP Ctlr '%d' attempted to queue work, " fcoe_ctlr_device_queue_devloss_work() 651 return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay); fcoe_ctlr_device_queue_devloss_work() 743 * are freed (work q), but the memory is not freed 780 * @work: The FIP fcf's embedded work struct 785 static void fcoe_fcf_device_final_delete(struct work_struct *work) fcoe_fcf_device_final_delete() argument 788 container_of(work, struct fcoe_fcf_device, delete_work); fcoe_fcf_device_final_delete() 804 * @work: The FIP fcf's embedded work struct 809 static void fip_timeout_deleted_fcf(struct work_struct *work) fip_timeout_deleted_fcf() argument 812 container_of(work, struct fcoe_fcf_device, dev_loss_work.work); fip_timeout_deleted_fcf() 597 fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr, struct work_struct *work) fcoe_ctlr_device_queue_work() argument 638 fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr, struct delayed_work *work, unsigned long delay) fcoe_ctlr_device_queue_devloss_work() argument
|
/linux-4.1.27/drivers/vhost/ |
H A D | vhost.c | 9 * This work is licensed under the terms of the GNU GPL, version 2. 61 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) vhost_work_init() argument 63 INIT_LIST_HEAD(&work->node); vhost_work_init() 64 work->fn = fn; vhost_work_init() 65 init_waitqueue_head(&work->done); vhost_work_init() 66 work->flushing = 0; vhost_work_init() 67 work->queue_seq = work->done_seq = 0; vhost_work_init() 81 vhost_work_init(&poll->work, fn); vhost_poll_init() 119 static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, vhost_work_seq_done() argument 125 left = seq - work->done_seq; vhost_work_seq_done() 130 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) vhost_work_flush() argument 136 seq = work->queue_seq; vhost_work_flush() 137 work->flushing++; vhost_work_flush() 139 wait_event(work->done, vhost_work_seq_done(dev, work, seq)); vhost_work_flush() 141 flushing = --work->flushing; vhost_work_flush() 147 /* Flush any work that has been scheduled. When calling this, don't hold any 151 vhost_work_flush(poll->dev, &poll->work); vhost_poll_flush() 155 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) vhost_work_queue() argument 160 if (list_empty(&work->node)) { vhost_work_queue() 161 list_add_tail(&work->node, &dev->work_list); vhost_work_queue() 162 work->queue_seq++; vhost_work_queue() 173 vhost_work_queue(poll->dev, &poll->work); vhost_poll_queue() 207 struct vhost_work *work = NULL; vhost_worker() local 219 if (work) { vhost_worker() 220 work->done_seq = seq; vhost_worker() 221 if (work->flushing) vhost_worker() 222 wake_up_all(&work->done); vhost_worker() 231 work = list_first_entry(&dev->work_list, vhost_worker() 233 list_del_init(&work->node); vhost_worker() 234 seq = work->queue_seq; vhost_worker() 236 work = NULL; vhost_worker() 239 if (work) { vhost_worker() 241 work->fn(work); vhost_worker() 335 struct vhost_work work; member in struct:vhost_attach_cgroups_struct 340 static void vhost_attach_cgroups_work(struct vhost_work *work) vhost_attach_cgroups_work() argument 344 s = container_of(work, struct vhost_attach_cgroups_struct, work); vhost_attach_cgroups_work() 353 vhost_work_init(&attach.work, vhost_attach_cgroups_work); vhost_attach_cgroups() 354 vhost_work_queue(dev, &attach.work); vhost_attach_cgroups() 355 vhost_work_flush(dev, &attach.work); vhost_attach_cgroups()
|
H A D | vhost.h | 16 typedef void (*vhost_work_fn_t)(struct vhost_work *work); 33 struct vhost_work work; member in struct:vhost_poll 38 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn); 39 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work); 47 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
|
/linux-4.1.27/drivers/media/pci/cx18/ |
H A D | cx18-streams.h | 51 /* Put mdl on q_free; the out work handler will move mdl(s) to q_busy */ cx18_stream_put_mdl_fw() 56 void cx18_out_work_handler(struct work_struct *work);
|
/linux-4.1.27/drivers/message/fusion/lsi/ |
H A D | mpi_type.h | 18 * 11-02-00 01.01.01 Original release for post 1.0 work 20 * 08-08-01 01.02.01 Original release for v1.2 work.
|
/linux-4.1.27/drivers/net/wireless/rtl818x/rtl8187/ |
H A D | leds.c | 25 static void led_turn_on(struct work_struct *work) led_turn_on() argument 28 * be run from a work queue. led_turn_on() 31 struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, led_turn_on() 32 led_on.work); led_turn_on() 63 static void led_turn_off(struct work_struct *work) led_turn_off() argument 66 * be run from a work queue. led_turn_off() 69 struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, led_turn_off() 70 led_off.work); led_turn_off()
|
/linux-4.1.27/drivers/input/ |
H A D | input-polldev.c | 33 queue_delayed_work(system_freezable_wq, &dev->work, delay); input_polldev_queue_work() 36 static void input_polled_device_work(struct work_struct *work) input_polled_device_work() argument 39 container_of(work, struct input_polled_dev, work.work); input_polled_device_work() 65 cancel_delayed_work_sync(&dev->work); input_close_polled_device() 105 cancel_delayed_work_sync(&polldev->work); input_polldev_set_poll() 311 INIT_DELAYED_WORK(&dev->work, input_polled_device_work); input_register_polled_device()
|
/linux-4.1.27/arch/x86/vdso/vdso32/ |
H A D | note.S | 10 doesn't work. Remember to change this when changing the 26 * corresponding to the bits here is needed to make ldconfig work right.
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ |
H A D | temp.c | 80 nv_poweroff_work(struct work_struct *work) nv_poweroff_work() argument 83 kfree(work); nv_poweroff_work() 125 struct work_struct *work; nvkm_therm_sensor_event() local 127 work = kmalloc(sizeof(*work), GFP_ATOMIC); nvkm_therm_sensor_event() 128 if (work) { nvkm_therm_sensor_event() 129 INIT_WORK(work, nv_poweroff_work); nvkm_therm_sensor_event() 130 schedule_work(work); nvkm_therm_sensor_event()
|
/linux-4.1.27/drivers/bluetooth/ |
H A D | bcm203x.c | 68 struct work_struct work; member in struct:bcm203x_data 103 schedule_work(&data->work); bcm203x_complete() 156 static void bcm203x_work(struct work_struct *work) bcm203x_work() argument 159 container_of(work, struct bcm203x_data, work); bcm203x_work() 243 INIT_WORK(&data->work, bcm203x_work); bcm203x_probe() 248 schedule_work(&data->work); bcm203x_probe() 260 cancel_work_sync(&data->work); bcm203x_disconnect()
|
/linux-4.1.27/drivers/hid/ |
H A D | hid-elo.c | 31 struct delayed_work work; member in struct:elo_priv 128 static void elo_work(struct work_struct *work) elo_work() argument 130 struct elo_priv *priv = container_of(work, struct elo_priv, work.work); elo_work() 175 queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); elo_work() 229 INIT_DELAYED_WORK(&priv->work, elo_work); elo_probe() 248 queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); elo_probe()
|
H A D | hid-gt683r.c | 58 struct work_struct work; member in struct:gt683r_led 83 schedule_work(&led->work); gt683r_brightness_set() 129 schedule_work(&led->work); mode_store() 190 static void gt683r_led_work(struct work_struct *work) gt683r_led_work() argument 195 struct gt683r_led *led = container_of(work, struct gt683r_led, work); gt683r_led_work() 248 INIT_WORK(&led->work, gt683r_led_work); gt683r_led_probe() 306 flush_work(&led->work); gt683r_led_remove()
|
/linux-4.1.27/drivers/usb/gadget/udc/ |
H A D | gadget_chips.h | 6 * This SHOULD eventually work like the ARM mach_is_*() stuff, driven by 38 * gadget_supports_altsettings - return true if altsettings work
|
/linux-4.1.27/drivers/video/ |
H A D | of_videomode.c | 27 * only one videomode is to be retrieved. A driver that needs to work 28 * with multiple/all videomodes should work with
|
/linux-4.1.27/arch/score/include/uapi/asm/ |
H A D | ptrace.h | 9 #define BREAKPOINT16_INSN 0x7002 /* work on SPG300 */ 10 #define BREAKPOINT32_INSN 0x84048000 /* work on SPG300 */
|
/linux-4.1.27/arch/arm/mach-omap2/ |
H A D | omap2-restart.c | 29 * omap2xxx_restart - Set DPLL to bypass mode for reboot to work 50 * operation to work - see omap2xxx_restart(). Returns -EINVAL upon
|
/linux-4.1.27/arch/arm/nwfpe/ |
H A D | milieu.h | 8 Written by John R. Hauser. This work was made possible in part by the 25 (1) they include prominent notice that the work is derivative, and (2) they
|
/linux-4.1.27/net/core/ |
H A D | utils.c | 338 struct work_struct work; member in struct:__net_random_once_work 344 struct __net_random_once_work *work = __net_random_once_deferred() local 345 container_of(w, struct __net_random_once_work, work); __net_random_once_deferred() 346 BUG_ON(!static_key_enabled(work->key)); __net_random_once_deferred() 347 static_key_slow_dec(work->key); __net_random_once_deferred() 348 kfree(work); __net_random_once_deferred() 359 INIT_WORK(&w->work, __net_random_once_deferred); __net_random_once_disable_jump() 361 schedule_work(&w->work); __net_random_once_disable_jump()
|
/linux-4.1.27/include/crypto/ |
H A D | mcryptd.h | 29 struct work_struct work; member in struct:mcryptd_cpu_queue 70 void mcryptd_flusher(struct work_struct *work);
|
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmfmac/ |
H A D | btcoex.c | 72 * @work: DHCP state machine work 93 struct work_struct work; member in struct:brcmf_btcoex_info 289 schedule_work(&bt_local->work); brcmf_btcoex_timerfunc() 293 * brcmf_btcoex_handler() - BT coex state machine work handler 294 * @work: work 296 static void brcmf_btcoex_handler(struct work_struct *work) brcmf_btcoex_handler() argument 299 btci = container_of(work, struct brcmf_btcoex_info, work); brcmf_btcoex_handler() 392 INIT_WORK(&btci->work, brcmf_btcoex_handler); brcmf_btcoex_attach() 414 cancel_work_sync(&cfg->btcoex->work); brcmf_btcoex_detach() 434 schedule_work(&btci->work); brcmf_btcoex_dhcp_start() 451 schedule_work(&btci->work); brcmf_btcoex_dhcp_end()
|
/linux-4.1.27/sound/i2c/other/ |
H A D | ak4113.c | 40 static void ak4113_stats(struct work_struct *work); 59 atomic_inc(&chip->wq_processing); /* don't schedule new work */ snd_ak4113_free() 60 cancel_delayed_work_sync(&chip->work); snd_ak4113_free() 90 INIT_DELAYED_WORK(&chip->work, ak4113_stats); snd_ak4113_create() 144 cancel_delayed_work_sync(&chip->work); snd_ak4113_reinit() 150 schedule_delayed_work(&chip->work, HZ / 10); snd_ak4113_reinit() 524 schedule_delayed_work(&ak4113->work, HZ / 10); snd_ak4113_build() 632 static void ak4113_stats(struct work_struct *work) ak4113_stats() argument 634 struct ak4113 *chip = container_of(work, struct ak4113, work.work); ak4113_stats() 640 schedule_delayed_work(&chip->work, HZ / 10); ak4113_stats() 646 atomic_inc(&chip->wq_processing); /* don't schedule new work */ snd_ak4113_suspend() 647 cancel_delayed_work_sync(&chip->work); snd_ak4113_suspend()
|
H A D | ak4114.c | 39 static void ak4114_stats(struct work_struct *work); 69 atomic_inc(&chip->wq_processing); /* don't schedule new work */ snd_ak4114_free() 70 cancel_delayed_work_sync(&chip->work); snd_ak4114_free() 101 INIT_DELAYED_WORK(&chip->work, ak4114_stats); snd_ak4114_create() 159 cancel_delayed_work_sync(&chip->work); snd_ak4114_reinit() 165 schedule_delayed_work(&chip->work, HZ / 10); snd_ak4114_reinit() 509 schedule_delayed_work(&ak4114->work, HZ / 10); snd_ak4114_build() 618 static void ak4114_stats(struct work_struct *work) ak4114_stats() argument 620 struct ak4114 *chip = container_of(work, struct ak4114, work.work); ak4114_stats() 625 schedule_delayed_work(&chip->work, HZ / 10); ak4114_stats() 631 atomic_inc(&chip->wq_processing); /* don't schedule new work */ snd_ak4114_suspend() 632 cancel_delayed_work_sync(&chip->work); snd_ak4114_suspend()
|
/linux-4.1.27/drivers/staging/i2o/ |
H A D | exec-osm.c | 61 struct work_struct work; /* work struct */ member in struct:i2o_exec_lct_notify_work 423 * @_work: work struct for a specific controller 431 struct i2o_exec_lct_notify_work *work = i2o_exec_lct_modified() local 432 container_of(_work, struct i2o_exec_lct_notify_work, work); i2o_exec_lct_modified() 434 struct i2o_controller *c = work->c; i2o_exec_lct_modified() 436 kfree(work); i2o_exec_lct_modified() 489 struct i2o_exec_lct_notify_work *work; i2o_exec_reply() local 493 work = kmalloc(sizeof(*work), GFP_ATOMIC); i2o_exec_reply() 494 if (!work) i2o_exec_reply() 497 work->c = c; i2o_exec_reply() 499 INIT_WORK(&work->work, i2o_exec_lct_modified); i2o_exec_reply() 500 queue_work(i2o_exec_driver.event_queue, &work->work); i2o_exec_reply() 519 * @work: Work item in occurring event 524 static void i2o_exec_event(struct work_struct *work) i2o_exec_event() argument 526 struct i2o_event *evt = container_of(work, struct i2o_event, work); i2o_exec_event()
|
/linux-4.1.27/block/ |
H A D | blk-iopoll.c | 87 int work, weight; blk_iopoll_softirq() local 107 work = 0; blk_iopoll_softirq() 109 work = iop->poll(iop, weight); blk_iopoll_softirq() 111 budget -= work; blk_iopoll_softirq() 123 if (work >= weight) { blk_iopoll_softirq()
|
/linux-4.1.27/arch/powerpc/platforms/powermac/ |
H A D | backlight.c | 22 static void pmac_backlight_key_worker(struct work_struct *work); 23 static void pmac_backlight_set_legacy_worker(struct work_struct *work); 100 static void pmac_backlight_key_worker(struct work_struct *work) pmac_backlight_key_worker() argument 133 /* we can receive multiple interrupts here, but the scheduled work pmac_backlight_key() 167 static void pmac_backlight_set_legacy_worker(struct work_struct *work) pmac_backlight_set_legacy_worker() argument
|
/linux-4.1.27/drivers/misc/mic/host/ |
H A D | mic_boot.c | 240 * @work: The work structure. 242 * This work is scheduled whenever the host has received a shutdown 245 void mic_shutdown_work(struct work_struct *work) mic_shutdown_work() argument 247 struct mic_device *mdev = container_of(work, struct mic_device, mic_shutdown_work() 268 * @work: The work structure. 270 * This work is scheduled whenever the host wants to reset the MIC. 272 void mic_reset_trigger_work(struct work_struct *work) mic_reset_trigger_work() argument 274 struct mic_device *mdev = container_of(work, struct mic_device, mic_reset_trigger_work()
|
/linux-4.1.27/drivers/input/keyboard/ |
H A D | qt2160.c | 61 struct work_struct work; member in struct:qt2160_led 86 static void qt2160_led_work(struct work_struct *work) qt2160_led_work() argument 88 struct qt2160_led *led = container_of(work, struct qt2160_led, work); qt2160_led_work() 125 schedule_work(&led->work); qt2160_led_set() 242 static void qt2160_worker(struct work_struct *work) qt2160_worker() argument 245 container_of(work, struct qt2160_data, dwork.work); qt2160_worker() 308 INIT_WORK(&led->work, qt2160_led_work); qt2160_register_leds() 329 cancel_work_sync(&qt2160->leds[i].work); qt2160_unregister_leds()
|
/linux-4.1.27/drivers/firewire/ |
H A D | core-device.c | 783 queue_delayed_work(fw_workqueue, &device->work, delay); fw_schedule_device_work() 802 static void fw_device_shutdown(struct work_struct *work) fw_device_shutdown() argument 805 container_of(work, struct fw_device, work.work); fw_device_shutdown() 840 * bus manager work looks at this node. fw_device_release() 875 static void fw_device_update(struct work_struct *work) fw_device_update() argument 878 container_of(work, struct fw_device, work.work); fw_device_update() 997 static void fw_device_init(struct work_struct *work) fw_device_init() argument 1000 container_of(work, struct fw_device, work.work); fw_device_init() 1071 * out from under us while we did the intialization work, we fw_device_init() 1096 * Reschedule the IRM work if we just finished reading the fw_device_init() 1098 * just end up running the IRM work a couple of extra times - fw_device_init() 1142 static void fw_device_refresh(struct work_struct *work) fw_device_refresh() argument 1145 container_of(work, struct fw_device, work.work); fw_device_refresh() 1160 fw_device_update(work); fw_device_refresh() 1209 static void fw_device_workfn(struct work_struct *work) fw_device_workfn() argument 1211 struct fw_device *device = container_of(to_delayed_work(work), fw_device_workfn() 1212 struct fw_device, work); fw_device_workfn() 1213 device->workfn(work); fw_device_workfn() 1240 * schedule work until then, but only while holding fw_node_event() 1266 INIT_DELAYED_WORK(&device->work, fw_device_workfn); fw_node_event() 1313 * initialized we can reuse device->work to schedule a fw_node_event() 1314 * full fw_device_shutdown(). If not, there's work fw_node_event()
|
/linux-4.1.27/include/scsi/ |
H A D | libsas.h | 231 struct work_struct work; member in struct:sas_work 236 INIT_WORK(&sw->work, fn); INIT_SAS_WORK() 241 struct sas_work work; member in struct:sas_discovery_event 245 static inline struct sas_discovery_event *to_sas_discovery_event(struct work_struct *work) to_sas_discovery_event() argument 247 struct sas_discovery_event *ev = container_of(work, typeof(*ev), work.work); to_sas_discovery_event() 274 struct sas_work work; member in struct:asd_sas_port 301 struct sas_work work; member in struct:asd_sas_event 305 static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work) to_asd_sas_event() argument 307 struct asd_sas_event *ev = container_of(work, typeof(*ev), work.work); to_asd_sas_event() 366 struct sas_work work; member in struct:sas_ha_event 370 static inline struct sas_ha_event *to_sas_ha_event(struct work_struct *work) to_sas_ha_event() argument 372 struct sas_ha_event *ev = container_of(work, typeof(*ev), work.work); to_sas_ha_event() 389 struct list_head defer_q; /* work queued while draining */
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
H A D | mcg.c | 104 struct work_struct work; member in struct:mcast_group 122 /* delayed work to clean pending SM request */ 432 * timout work is canceled sync */ release_group() 535 static void mlx4_ib_mcg_timeout_handler(struct work_struct *work) mlx4_ib_mcg_timeout_handler() argument 537 struct delayed_work *delay = to_delayed_work(work); mlx4_ib_mcg_timeout_handler() 574 if (!queue_work(group->demux->mcg_wq, &group->work)) mlx4_ib_mcg_timeout_handler() 638 static void mlx4_ib_mcg_work_handler(struct work_struct *work) mlx4_ib_mcg_work_handler() argument 644 int rc = 1; /* release_count - this is for the scheduled work */ mlx4_ib_mcg_work_handler() 648 group = container_of(work, typeof(*group), work); mlx4_ib_mcg_work_handler() 837 INIT_WORK(&group->work, mlx4_ib_mcg_work_handler); acquire_group() 874 atomic_inc(&group->refcount); /* for scheduling the work */ queue_req() 878 if (!queue_work(group->demux->mcg_wq, &group->work)) queue_req() 914 if (!queue_work(ctx->mcg_wq, &group->work)) mlx4_ib_mcg_demux_handler() 1112 struct work_struct work; member in struct:clean_work 1117 static void mcg_clean_task(struct work_struct *work) mcg_clean_task() argument 1119 struct clean_work *cw = container_of(work, struct clean_work, work); mcg_clean_task() 1128 struct clean_work *work; mlx4_ib_mcg_port_cleanup() local 1141 work = kmalloc(sizeof *work, GFP_KERNEL); mlx4_ib_mcg_port_cleanup() 1142 if (!work) { mlx4_ib_mcg_port_cleanup() 1144 mcg_warn("failed allocating work for cleanup\n"); mlx4_ib_mcg_port_cleanup() 1148 work->ctx = ctx; mlx4_ib_mcg_port_cleanup() 1149 work->destroy_wq = destroy_wq; mlx4_ib_mcg_port_cleanup() 1150 INIT_WORK(&work->work, mcg_clean_task); mlx4_ib_mcg_port_cleanup() 1151 queue_work(clean_wq, &work->work); mlx4_ib_mcg_port_cleanup()
|