1#include <linux/spinlock.h>
2#include <linux/task_work.h>
3#include <linux/tracehook.h>
4
5static struct callback_head work_exited; /* all we need is ->next == NULL */
6
7/**
8 * task_work_add - ask the @task to execute @work->func()
9 * @task: the task which should run the callback
10 * @work: the callback to run
11 * @notify: send the notification if true
12 *
13 * Queue @work for task_work_run() below and notify the @task if @notify.
14 * Fails if the @task is exiting/exited and thus it can't process this @work.
15 * Otherwise @work->func() will be called when the @task returns from kernel
16 * mode or exits.
17 *
18 * This is like the signal handler which runs in kernel mode, but it doesn't
19 * try to wake up the @task.
20 *
21 * RETURNS:
22 * 0 if succeeds or -ESRCH.
23 */
24int
25task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
26{
27	struct callback_head *head;
28
29	do {
30		head = ACCESS_ONCE(task->task_works);
31		if (unlikely(head == &work_exited))
32			return -ESRCH;
33		work->next = head;
34	} while (cmpxchg(&task->task_works, head, work) != head);
35
36	if (notify)
37		set_notify_resume(task);
38	return 0;
39}
40
41/**
42 * task_work_cancel - cancel a pending work added by task_work_add()
43 * @task: the task which should execute the work
44 * @func: identifies the work to remove
45 *
46 * Find the last queued pending work with ->func == @func and remove
47 * it from queue.
48 *
49 * RETURNS:
50 * The found work or NULL if not found.
51 */
52struct callback_head *
53task_work_cancel(struct task_struct *task, task_work_func_t func)
54{
55	struct callback_head **pprev = &task->task_works;
56	struct callback_head *work;
57	unsigned long flags;
58	/*
59	 * If cmpxchg() fails we continue without updating pprev.
60	 * Either we raced with task_work_add() which added the
61	 * new entry before this work, we will find it again. Or
62	 * we raced with task_work_run(), *pprev == NULL/exited.
63	 */
64	raw_spin_lock_irqsave(&task->pi_lock, flags);
65	while ((work = ACCESS_ONCE(*pprev))) {
66		smp_read_barrier_depends();
67		if (work->func != func)
68			pprev = &work->next;
69		else if (cmpxchg(pprev, work, work->next) == work)
70			break;
71	}
72	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
73
74	return work;
75}
76
77/**
78 * task_work_run - execute the works added by task_work_add()
79 *
80 * Flush the pending works. Should be used by the core kernel code.
81 * Called before the task returns to the user-mode or stops, or when
82 * it exits. In the latter case task_work_add() can no longer add the
83 * new work after task_work_run() returns.
84 */
85void task_work_run(void)
86{
87	struct task_struct *task = current;
88	struct callback_head *work, *head, *next;
89
90	for (;;) {
91		/*
92		 * work->func() can do task_work_add(), do not set
93		 * work_exited unless the list is empty.
94		 */
95		do {
96			work = ACCESS_ONCE(task->task_works);
97			head = !work && (task->flags & PF_EXITING) ?
98				&work_exited : NULL;
99		} while (cmpxchg(&task->task_works, work, head) != work);
100
101		if (!work)
102			break;
103		/*
104		 * Synchronize with task_work_cancel(). It can't remove
105		 * the first entry == work, cmpxchg(task_works) should
106		 * fail, but it can play with *work and other entries.
107		 */
108		raw_spin_unlock_wait(&task->pi_lock);
109		smp_mb();
110
111		/* Reverse the list to run the works in fifo order */
112		head = NULL;
113		do {
114			next = work->next;
115			work->next = head;
116			head = work;
117			work = next;
118		} while (work);
119
120		work = head;
121		do {
122			next = work->next;
123			work->func(work);
124			work = next;
125			cond_resched();
126		} while (work);
127	}
128}
129