This source file includes following definitions.
- set_kthread_struct
- to_kthread
- free_kthread_struct
- kthread_should_stop
- __kthread_should_park
- kthread_should_park
- kthread_freezable_should_stop
- kthread_data
- kthread_probe_data
- __kthread_parkme
- kthread_parkme
- kthread
- tsk_fork_get_node
- create_kthread
- __printf
- kthread_create_on_node
- __kthread_bind_mask
- __kthread_bind
- kthread_bind_mask
- kthread_bind
- kthread_create_on_cpu
- kthread_unpark
- kthread_park
- kthread_stop
- kthreadd
- __kthread_init_worker
- kthread_worker_fn
- __printf
- kthread_create_worker
- kthread_create_worker_on_cpu
- queuing_blocked
- kthread_insert_work_sanity_check
- kthread_insert_work
- kthread_queue_work
- kthread_delayed_work_timer_fn
- __kthread_queue_delayed_work
- kthread_queue_delayed_work
- kthread_flush_work_fn
- kthread_flush_work
- __kthread_cancel_work
- kthread_mod_delayed_work
- __kthread_cancel_work_sync
- kthread_cancel_work_sync
- kthread_cancel_delayed_work_sync
- kthread_flush_worker
- kthread_destroy_worker
- kthread_associate_blkcg
- kthread_blkcg
1
2
3
4
5
6
7
8
9 #include <uapi/linux/sched/types.h>
10 #include <linux/sched.h>
11 #include <linux/sched/task.h>
12 #include <linux/kthread.h>
13 #include <linux/completion.h>
14 #include <linux/err.h>
15 #include <linux/cgroup.h>
16 #include <linux/cpuset.h>
17 #include <linux/unistd.h>
18 #include <linux/file.h>
19 #include <linux/export.h>
20 #include <linux/mutex.h>
21 #include <linux/slab.h>
22 #include <linux/freezer.h>
23 #include <linux/ptrace.h>
24 #include <linux/uaccess.h>
25 #include <linux/numa.h>
26 #include <trace/events/sched.h>
27
28 static DEFINE_SPINLOCK(kthread_create_lock);
29 static LIST_HEAD(kthread_create_list);
30 struct task_struct *kthreadd_task;
31
32 struct kthread_create_info
33 {
34
35 int (*threadfn)(void *data);
36 void *data;
37 int node;
38
39
40 struct task_struct *result;
41 struct completion *done;
42
43 struct list_head list;
44 };
45
46 struct kthread {
47 unsigned long flags;
48 unsigned int cpu;
49 void *data;
50 struct completion parked;
51 struct completion exited;
52 #ifdef CONFIG_BLK_CGROUP
53 struct cgroup_subsys_state *blkcg_css;
54 #endif
55 };
56
57 enum KTHREAD_BITS {
58 KTHREAD_IS_PER_CPU = 0,
59 KTHREAD_SHOULD_STOP,
60 KTHREAD_SHOULD_PARK,
61 };
62
63 static inline void set_kthread_struct(void *kthread)
64 {
65
66
67
68
69
70 current->set_child_tid = (__force void __user *)kthread;
71 }
72
73 static inline struct kthread *to_kthread(struct task_struct *k)
74 {
75 WARN_ON(!(k->flags & PF_KTHREAD));
76 return (__force void *)k->set_child_tid;
77 }
78
79 void free_kthread_struct(struct task_struct *k)
80 {
81 struct kthread *kthread;
82
83
84
85
86
87 kthread = to_kthread(k);
88 #ifdef CONFIG_BLK_CGROUP
89 WARN_ON_ONCE(kthread && kthread->blkcg_css);
90 #endif
91 kfree(kthread);
92 }
93
94
95
96
97
98
99
100
101 bool kthread_should_stop(void)
102 {
103 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
104 }
105 EXPORT_SYMBOL(kthread_should_stop);
106
107 bool __kthread_should_park(struct task_struct *k)
108 {
109 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
110 }
111 EXPORT_SYMBOL_GPL(__kthread_should_park);
112
113
114
115
116
117
118
119
120
121
122
123
124 bool kthread_should_park(void)
125 {
126 return __kthread_should_park(current);
127 }
128 EXPORT_SYMBOL_GPL(kthread_should_park);
129
130
131
132
133
134
135
136
137
138
139 bool kthread_freezable_should_stop(bool *was_frozen)
140 {
141 bool frozen = false;
142
143 might_sleep();
144
145 if (unlikely(freezing(current)))
146 frozen = __refrigerator(true);
147
148 if (was_frozen)
149 *was_frozen = frozen;
150
151 return kthread_should_stop();
152 }
153 EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
154
155
156
157
158
159
160
161
162
163 void *kthread_data(struct task_struct *task)
164 {
165 return to_kthread(task)->data;
166 }
167
168
169
170
171
172
173
174
175
176
177 void *kthread_probe_data(struct task_struct *task)
178 {
179 struct kthread *kthread = to_kthread(task);
180 void *data = NULL;
181
182 probe_kernel_read(&data, &kthread->data, sizeof(data));
183 return data;
184 }
185
186 static void __kthread_parkme(struct kthread *self)
187 {
188 for (;;) {
189
190
191
192
193
194
195
196
197
198 set_special_state(TASK_PARKED);
199 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
200 break;
201
202 complete(&self->parked);
203 schedule();
204 }
205 __set_current_state(TASK_RUNNING);
206 }
207
208 void kthread_parkme(void)
209 {
210 __kthread_parkme(to_kthread(current));
211 }
212 EXPORT_SYMBOL_GPL(kthread_parkme);
213
214 static int kthread(void *_create)
215 {
216
217 struct kthread_create_info *create = _create;
218 int (*threadfn)(void *data) = create->threadfn;
219 void *data = create->data;
220 struct completion *done;
221 struct kthread *self;
222 int ret;
223
224 self = kzalloc(sizeof(*self), GFP_KERNEL);
225 set_kthread_struct(self);
226
227
228 done = xchg(&create->done, NULL);
229 if (!done) {
230 kfree(create);
231 do_exit(-EINTR);
232 }
233
234 if (!self) {
235 create->result = ERR_PTR(-ENOMEM);
236 complete(done);
237 do_exit(-ENOMEM);
238 }
239
240 self->data = data;
241 init_completion(&self->exited);
242 init_completion(&self->parked);
243 current->vfork_done = &self->exited;
244
245
246 __set_current_state(TASK_UNINTERRUPTIBLE);
247 create->result = current;
248 complete(done);
249 schedule();
250
251 ret = -EINTR;
252 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
253 cgroup_kthread_ready();
254 __kthread_parkme(self);
255 ret = threadfn(data);
256 }
257 do_exit(ret);
258 }
259
260
261 int tsk_fork_get_node(struct task_struct *tsk)
262 {
263 #ifdef CONFIG_NUMA
264 if (tsk == kthreadd_task)
265 return tsk->pref_node_fork;
266 #endif
267 return NUMA_NO_NODE;
268 }
269
270 static void create_kthread(struct kthread_create_info *create)
271 {
272 int pid;
273
274 #ifdef CONFIG_NUMA
275 current->pref_node_fork = create->node;
276 #endif
277
278 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
279 if (pid < 0) {
280
281 struct completion *done = xchg(&create->done, NULL);
282
283 if (!done) {
284 kfree(create);
285 return;
286 }
287 create->result = ERR_PTR(pid);
288 complete(done);
289 }
290 }
291
292 static __printf(4, 0)
293 struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
294 void *data, int node,
295 const char namefmt[],
296 va_list args)
297 {
298 DECLARE_COMPLETION_ONSTACK(done);
299 struct task_struct *task;
300 struct kthread_create_info *create = kmalloc(sizeof(*create),
301 GFP_KERNEL);
302
303 if (!create)
304 return ERR_PTR(-ENOMEM);
305 create->threadfn = threadfn;
306 create->data = data;
307 create->node = node;
308 create->done = &done;
309
310 spin_lock(&kthread_create_lock);
311 list_add_tail(&create->list, &kthread_create_list);
312 spin_unlock(&kthread_create_lock);
313
314 wake_up_process(kthreadd_task);
315
316
317
318
319
320 if (unlikely(wait_for_completion_killable(&done))) {
321
322
323
324
325
326 if (xchg(&create->done, NULL))
327 return ERR_PTR(-EINTR);
328
329
330
331
332 wait_for_completion(&done);
333 }
334 task = create->result;
335 if (!IS_ERR(task)) {
336 static const struct sched_param param = { .sched_priority = 0 };
337 char name[TASK_COMM_LEN];
338
339
340
341
342
343 vsnprintf(name, sizeof(name), namefmt, args);
344 set_task_comm(task, name);
345
346
347
348
349 sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m);
350 set_cpus_allowed_ptr(task, cpu_all_mask);
351 }
352 kfree(create);
353 return task;
354 }
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
380 void *data, int node,
381 const char namefmt[],
382 ...)
383 {
384 struct task_struct *task;
385 va_list args;
386
387 va_start(args, namefmt);
388 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
389 va_end(args);
390
391 return task;
392 }
393 EXPORT_SYMBOL(kthread_create_on_node);
394
395 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
396 {
397 unsigned long flags;
398
399 if (!wait_task_inactive(p, state)) {
400 WARN_ON(1);
401 return;
402 }
403
404
405 raw_spin_lock_irqsave(&p->pi_lock, flags);
406 do_set_cpus_allowed(p, mask);
407 p->flags |= PF_NO_SETAFFINITY;
408 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
409 }
410
411 static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
412 {
413 __kthread_bind_mask(p, cpumask_of(cpu), state);
414 }
415
416 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
417 {
418 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
419 }
420
421
422
423
424
425
426
427
428
429
430 void kthread_bind(struct task_struct *p, unsigned int cpu)
431 {
432 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
433 }
434 EXPORT_SYMBOL(kthread_bind);
435
436
437
438
439
440
441
442
443
444
445
446
447 struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
448 void *data, unsigned int cpu,
449 const char *namefmt)
450 {
451 struct task_struct *p;
452
453 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
454 cpu);
455 if (IS_ERR(p))
456 return p;
457 kthread_bind(p, cpu);
458
459 set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
460 to_kthread(p)->cpu = cpu;
461 return p;
462 }
463
464
465
466
467
468
469
470
471
472 void kthread_unpark(struct task_struct *k)
473 {
474 struct kthread *kthread = to_kthread(k);
475
476
477
478
479
480 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
481 __kthread_bind(k, kthread->cpu, TASK_PARKED);
482
483 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
484
485
486
487 wake_up_state(k, TASK_PARKED);
488 }
489 EXPORT_SYMBOL_GPL(kthread_unpark);
490
491
492
493
494
495
496
497
498
499
500
501
502
503 int kthread_park(struct task_struct *k)
504 {
505 struct kthread *kthread = to_kthread(k);
506
507 if (WARN_ON(k->flags & PF_EXITING))
508 return -ENOSYS;
509
510 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
511 return -EBUSY;
512
513 set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
514 if (k != current) {
515 wake_up_process(k);
516
517
518
519
520 wait_for_completion(&kthread->parked);
521
522
523
524
525 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
526 }
527
528 return 0;
529 }
530 EXPORT_SYMBOL_GPL(kthread_park);
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547 int kthread_stop(struct task_struct *k)
548 {
549 struct kthread *kthread;
550 int ret;
551
552 trace_sched_kthread_stop(k);
553
554 get_task_struct(k);
555 kthread = to_kthread(k);
556 set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
557 kthread_unpark(k);
558 wake_up_process(k);
559 wait_for_completion(&kthread->exited);
560 ret = k->exit_code;
561 put_task_struct(k);
562
563 trace_sched_kthread_stop_ret(ret);
564 return ret;
565 }
566 EXPORT_SYMBOL(kthread_stop);
567
568 int kthreadd(void *unused)
569 {
570 struct task_struct *tsk = current;
571
572
573 set_task_comm(tsk, "kthreadd");
574 ignore_signals(tsk);
575 set_cpus_allowed_ptr(tsk, cpu_all_mask);
576 set_mems_allowed(node_states[N_MEMORY]);
577
578 current->flags |= PF_NOFREEZE;
579 cgroup_init_kthreadd();
580
581 for (;;) {
582 set_current_state(TASK_INTERRUPTIBLE);
583 if (list_empty(&kthread_create_list))
584 schedule();
585 __set_current_state(TASK_RUNNING);
586
587 spin_lock(&kthread_create_lock);
588 while (!list_empty(&kthread_create_list)) {
589 struct kthread_create_info *create;
590
591 create = list_entry(kthread_create_list.next,
592 struct kthread_create_info, list);
593 list_del_init(&create->list);
594 spin_unlock(&kthread_create_lock);
595
596 create_kthread(create);
597
598 spin_lock(&kthread_create_lock);
599 }
600 spin_unlock(&kthread_create_lock);
601 }
602
603 return 0;
604 }
605
606 void __kthread_init_worker(struct kthread_worker *worker,
607 const char *name,
608 struct lock_class_key *key)
609 {
610 memset(worker, 0, sizeof(struct kthread_worker));
611 raw_spin_lock_init(&worker->lock);
612 lockdep_set_class_and_name(&worker->lock, key, name);
613 INIT_LIST_HEAD(&worker->work_list);
614 INIT_LIST_HEAD(&worker->delayed_work_list);
615 }
616 EXPORT_SYMBOL_GPL(__kthread_init_worker);
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633 int kthread_worker_fn(void *worker_ptr)
634 {
635 struct kthread_worker *worker = worker_ptr;
636 struct kthread_work *work;
637
638
639
640
641
642 WARN_ON(worker->task && worker->task != current);
643 worker->task = current;
644
645 if (worker->flags & KTW_FREEZABLE)
646 set_freezable();
647
648 repeat:
649 set_current_state(TASK_INTERRUPTIBLE);
650
651 if (kthread_should_stop()) {
652 __set_current_state(TASK_RUNNING);
653 raw_spin_lock_irq(&worker->lock);
654 worker->task = NULL;
655 raw_spin_unlock_irq(&worker->lock);
656 return 0;
657 }
658
659 work = NULL;
660 raw_spin_lock_irq(&worker->lock);
661 if (!list_empty(&worker->work_list)) {
662 work = list_first_entry(&worker->work_list,
663 struct kthread_work, node);
664 list_del_init(&work->node);
665 }
666 worker->current_work = work;
667 raw_spin_unlock_irq(&worker->lock);
668
669 if (work) {
670 __set_current_state(TASK_RUNNING);
671 work->func(work);
672 } else if (!freezing(current))
673 schedule();
674
675 try_to_freeze();
676 cond_resched();
677 goto repeat;
678 }
679 EXPORT_SYMBOL_GPL(kthread_worker_fn);
680
681 static __printf(3, 0) struct kthread_worker *
682 __kthread_create_worker(int cpu, unsigned int flags,
683 const char namefmt[], va_list args)
684 {
685 struct kthread_worker *worker;
686 struct task_struct *task;
687 int node = NUMA_NO_NODE;
688
689 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
690 if (!worker)
691 return ERR_PTR(-ENOMEM);
692
693 kthread_init_worker(worker);
694
695 if (cpu >= 0)
696 node = cpu_to_node(cpu);
697
698 task = __kthread_create_on_node(kthread_worker_fn, worker,
699 node, namefmt, args);
700 if (IS_ERR(task))
701 goto fail_task;
702
703 if (cpu >= 0)
704 kthread_bind(task, cpu);
705
706 worker->flags = flags;
707 worker->task = task;
708 wake_up_process(task);
709 return worker;
710
711 fail_task:
712 kfree(worker);
713 return ERR_CAST(task);
714 }
715
716
717
718
719
720
721
722
723
724
725 struct kthread_worker *
726 kthread_create_worker(unsigned int flags, const char namefmt[], ...)
727 {
728 struct kthread_worker *worker;
729 va_list args;
730
731 va_start(args, namefmt);
732 worker = __kthread_create_worker(-1, flags, namefmt, args);
733 va_end(args);
734
735 return worker;
736 }
737 EXPORT_SYMBOL(kthread_create_worker);
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756 struct kthread_worker *
757 kthread_create_worker_on_cpu(int cpu, unsigned int flags,
758 const char namefmt[], ...)
759 {
760 struct kthread_worker *worker;
761 va_list args;
762
763 va_start(args, namefmt);
764 worker = __kthread_create_worker(cpu, flags, namefmt, args);
765 va_end(args);
766
767 return worker;
768 }
769 EXPORT_SYMBOL(kthread_create_worker_on_cpu);
770
771
772
773
774
775
776 static inline bool queuing_blocked(struct kthread_worker *worker,
777 struct kthread_work *work)
778 {
779 lockdep_assert_held(&worker->lock);
780
781 return !list_empty(&work->node) || work->canceling;
782 }
783
784 static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
785 struct kthread_work *work)
786 {
787 lockdep_assert_held(&worker->lock);
788 WARN_ON_ONCE(!list_empty(&work->node));
789
790 WARN_ON_ONCE(work->worker && work->worker != worker);
791 }
792
793
794 static void kthread_insert_work(struct kthread_worker *worker,
795 struct kthread_work *work,
796 struct list_head *pos)
797 {
798 kthread_insert_work_sanity_check(worker, work);
799
800 list_add_tail(&work->node, pos);
801 work->worker = worker;
802 if (!worker->current_work && likely(worker->task))
803 wake_up_process(worker->task);
804 }
805
806
807
808
809
810
811
812
813
814
815
816
817
818 bool kthread_queue_work(struct kthread_worker *worker,
819 struct kthread_work *work)
820 {
821 bool ret = false;
822 unsigned long flags;
823
824 raw_spin_lock_irqsave(&worker->lock, flags);
825 if (!queuing_blocked(worker, work)) {
826 kthread_insert_work(worker, work, &worker->work_list);
827 ret = true;
828 }
829 raw_spin_unlock_irqrestore(&worker->lock, flags);
830 return ret;
831 }
832 EXPORT_SYMBOL_GPL(kthread_queue_work);
833
834
835
836
837
838
839
840
841
842 void kthread_delayed_work_timer_fn(struct timer_list *t)
843 {
844 struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
845 struct kthread_work *work = &dwork->work;
846 struct kthread_worker *worker = work->worker;
847 unsigned long flags;
848
849
850
851
852
853 if (WARN_ON_ONCE(!worker))
854 return;
855
856 raw_spin_lock_irqsave(&worker->lock, flags);
857
858 WARN_ON_ONCE(work->worker != worker);
859
860
861 WARN_ON_ONCE(list_empty(&work->node));
862 list_del_init(&work->node);
863 kthread_insert_work(worker, work, &worker->work_list);
864
865 raw_spin_unlock_irqrestore(&worker->lock, flags);
866 }
867 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
868
869 static void __kthread_queue_delayed_work(struct kthread_worker *worker,
870 struct kthread_delayed_work *dwork,
871 unsigned long delay)
872 {
873 struct timer_list *timer = &dwork->timer;
874 struct kthread_work *work = &dwork->work;
875
876 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
877
878
879
880
881
882
883
884 if (!delay) {
885 kthread_insert_work(worker, work, &worker->work_list);
886 return;
887 }
888
889
890 kthread_insert_work_sanity_check(worker, work);
891
892 list_add(&work->node, &worker->delayed_work_list);
893 work->worker = worker;
894 timer->expires = jiffies + delay;
895 add_timer(timer);
896 }
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913 bool kthread_queue_delayed_work(struct kthread_worker *worker,
914 struct kthread_delayed_work *dwork,
915 unsigned long delay)
916 {
917 struct kthread_work *work = &dwork->work;
918 unsigned long flags;
919 bool ret = false;
920
921 raw_spin_lock_irqsave(&worker->lock, flags);
922
923 if (!queuing_blocked(worker, work)) {
924 __kthread_queue_delayed_work(worker, dwork, delay);
925 ret = true;
926 }
927
928 raw_spin_unlock_irqrestore(&worker->lock, flags);
929 return ret;
930 }
931 EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
932
933 struct kthread_flush_work {
934 struct kthread_work work;
935 struct completion done;
936 };
937
938 static void kthread_flush_work_fn(struct kthread_work *work)
939 {
940 struct kthread_flush_work *fwork =
941 container_of(work, struct kthread_flush_work, work);
942 complete(&fwork->done);
943 }
944
945
946
947
948
949
950
951 void kthread_flush_work(struct kthread_work *work)
952 {
953 struct kthread_flush_work fwork = {
954 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
955 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
956 };
957 struct kthread_worker *worker;
958 bool noop = false;
959
960 worker = work->worker;
961 if (!worker)
962 return;
963
964 raw_spin_lock_irq(&worker->lock);
965
966 WARN_ON_ONCE(work->worker != worker);
967
968 if (!list_empty(&work->node))
969 kthread_insert_work(worker, &fwork.work, work->node.next);
970 else if (worker->current_work == work)
971 kthread_insert_work(worker, &fwork.work,
972 worker->work_list.next);
973 else
974 noop = true;
975
976 raw_spin_unlock_irq(&worker->lock);
977
978 if (!noop)
979 wait_for_completion(&fwork.done);
980 }
981 EXPORT_SYMBOL_GPL(kthread_flush_work);
982
983
984
985
986
987
988
989
990
991
992
993 static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
994 unsigned long *flags)
995 {
996
997 if (is_dwork) {
998 struct kthread_delayed_work *dwork =
999 container_of(work, struct kthread_delayed_work, work);
1000 struct kthread_worker *worker = work->worker;
1001
1002
1003
1004
1005
1006
1007
1008 work->canceling++;
1009 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1010 del_timer_sync(&dwork->timer);
1011 raw_spin_lock_irqsave(&worker->lock, *flags);
1012 work->canceling--;
1013 }
1014
1015
1016
1017
1018
1019 if (!list_empty(&work->node)) {
1020 list_del_init(&work->node);
1021 return true;
1022 }
1023
1024 return false;
1025 }
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050 bool kthread_mod_delayed_work(struct kthread_worker *worker,
1051 struct kthread_delayed_work *dwork,
1052 unsigned long delay)
1053 {
1054 struct kthread_work *work = &dwork->work;
1055 unsigned long flags;
1056 int ret = false;
1057
1058 raw_spin_lock_irqsave(&worker->lock, flags);
1059
1060
1061 if (!work->worker)
1062 goto fast_queue;
1063
1064
1065 WARN_ON_ONCE(work->worker != worker);
1066
1067
1068 if (work->canceling)
1069 goto out;
1070
1071 ret = __kthread_cancel_work(work, true, &flags);
1072 fast_queue:
1073 __kthread_queue_delayed_work(worker, dwork, delay);
1074 out:
1075 raw_spin_unlock_irqrestore(&worker->lock, flags);
1076 return ret;
1077 }
1078 EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1079
1080 static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1081 {
1082 struct kthread_worker *worker = work->worker;
1083 unsigned long flags;
1084 int ret = false;
1085
1086 if (!worker)
1087 goto out;
1088
1089 raw_spin_lock_irqsave(&worker->lock, flags);
1090
1091 WARN_ON_ONCE(work->worker != worker);
1092
1093 ret = __kthread_cancel_work(work, is_dwork, &flags);
1094
1095 if (worker->current_work != work)
1096 goto out_fast;
1097
1098
1099
1100
1101
1102 work->canceling++;
1103 raw_spin_unlock_irqrestore(&worker->lock, flags);
1104 kthread_flush_work(work);
1105 raw_spin_lock_irqsave(&worker->lock, flags);
1106 work->canceling--;
1107
1108 out_fast:
1109 raw_spin_unlock_irqrestore(&worker->lock, flags);
1110 out:
1111 return ret;
1112 }
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130 bool kthread_cancel_work_sync(struct kthread_work *work)
1131 {
1132 return __kthread_cancel_work_sync(work, false);
1133 }
1134 EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145 bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1146 {
1147 return __kthread_cancel_work_sync(&dwork->work, true);
1148 }
1149 EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1150
1151
1152
1153
1154
1155
1156
1157
1158 void kthread_flush_worker(struct kthread_worker *worker)
1159 {
1160 struct kthread_flush_work fwork = {
1161 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1162 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1163 };
1164
1165 kthread_queue_work(worker, &fwork.work);
1166 wait_for_completion(&fwork.done);
1167 }
1168 EXPORT_SYMBOL_GPL(kthread_flush_worker);
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 void kthread_destroy_worker(struct kthread_worker *worker)
1179 {
1180 struct task_struct *task;
1181
1182 task = worker->task;
1183 if (WARN_ON(!task))
1184 return;
1185
1186 kthread_flush_worker(worker);
1187 kthread_stop(task);
1188 WARN_ON(!list_empty(&worker->work_list));
1189 kfree(worker);
1190 }
1191 EXPORT_SYMBOL(kthread_destroy_worker);
1192
1193 #ifdef CONFIG_BLK_CGROUP
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204 void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1205 {
1206 struct kthread *kthread;
1207
1208 if (!(current->flags & PF_KTHREAD))
1209 return;
1210 kthread = to_kthread(current);
1211 if (!kthread)
1212 return;
1213
1214 if (kthread->blkcg_css) {
1215 css_put(kthread->blkcg_css);
1216 kthread->blkcg_css = NULL;
1217 }
1218 if (css) {
1219 css_get(css);
1220 kthread->blkcg_css = css;
1221 }
1222 }
1223 EXPORT_SYMBOL(kthread_associate_blkcg);
1224
1225
1226
1227
1228
1229
1230 struct cgroup_subsys_state *kthread_blkcg(void)
1231 {
1232 struct kthread *kthread;
1233
1234 if (current->flags & PF_KTHREAD) {
1235 kthread = to_kthread(current);
1236 if (kthread)
1237 return kthread->blkcg_css;
1238 }
1239 return NULL;
1240 }
1241 EXPORT_SYMBOL(kthread_blkcg);
1242 #endif