Home
last modified time | relevance | path

Searched refs:sched (Results 1 – 135 of 135) sorted by relevance

/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/
Dworkitem.c90 cfs_wi_sched_lock(cfs_wi_sched_t *sched) in cfs_wi_sched_lock() argument
92 spin_lock(&sched->ws_lock); in cfs_wi_sched_lock()
96 cfs_wi_sched_unlock(cfs_wi_sched_t *sched) in cfs_wi_sched_unlock() argument
98 spin_unlock(&sched->ws_lock); in cfs_wi_sched_unlock()
102 cfs_wi_sched_cansleep(cfs_wi_sched_t *sched) in cfs_wi_sched_cansleep() argument
104 cfs_wi_sched_lock(sched); in cfs_wi_sched_cansleep()
105 if (sched->ws_stopping) { in cfs_wi_sched_cansleep()
106 cfs_wi_sched_unlock(sched); in cfs_wi_sched_cansleep()
110 if (!list_empty(&sched->ws_runq)) { in cfs_wi_sched_cansleep()
111 cfs_wi_sched_unlock(sched); in cfs_wi_sched_cansleep()
[all …]
/linux-4.1.27/tools/perf/
Dbuiltin-sched.c105 int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
108 int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
111 int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
115 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
118 int (*migrate_task_event)(struct perf_sched *sched,
185 static void burn_nsecs(struct perf_sched *sched, u64 nsecs) in burn_nsecs() argument
191 } while (T1 + sched->run_measurement_overhead < T0 + nsecs); in burn_nsecs()
204 static void calibrate_run_measurement_overhead(struct perf_sched *sched) in calibrate_run_measurement_overhead() argument
211 burn_nsecs(sched, 0); in calibrate_run_measurement_overhead()
216 sched->run_measurement_overhead = min_delta; in calibrate_run_measurement_overhead()
[all …]
Dcommand-list.txt22 perf-sched mainporcelain common
DBuild6 perf-y += builtin-sched.o
Dperf-completion.sh168 if [[ $prev_skip_opts == @(kvm|kmem|mem|lock|sched|
Dbuiltin-trace.c1250 bool sched; member
2189 if (trace->sched && in trace__run()
2693 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), in cmd_trace()
/linux-4.1.27/net/netfilter/ipvs/
Dip_vs_sched.c66 struct ip_vs_scheduler *sched) in ip_vs_unbind_scheduler() argument
75 if (sched->done_service) in ip_vs_unbind_scheduler()
76 sched->done_service(svc); in ip_vs_unbind_scheduler()
86 struct ip_vs_scheduler *sched; in ip_vs_sched_getbyname() local
92 list_for_each_entry(sched, &ip_vs_schedulers, n_list) { in ip_vs_sched_getbyname()
96 if (sched->module && !try_module_get(sched->module)) { in ip_vs_sched_getbyname()
102 if (strcmp(sched_name, sched->name)==0) { in ip_vs_sched_getbyname()
105 return sched; in ip_vs_sched_getbyname()
107 module_put(sched->module); in ip_vs_sched_getbyname()
120 struct ip_vs_scheduler *sched; in ip_vs_scheduler_get() local
[all …]
Dip_vs_ctl.c793 struct ip_vs_scheduler *sched; in __ip_vs_update_dest() local
849 sched = rcu_dereference_protected(svc->scheduler, 1); in __ip_vs_update_dest()
850 if (sched && sched->add_dest) in __ip_vs_update_dest()
851 sched->add_dest(svc, dest); in __ip_vs_update_dest()
853 sched = rcu_dereference_protected(svc->scheduler, 1); in __ip_vs_update_dest()
854 if (sched && sched->upd_dest) in __ip_vs_update_dest()
855 sched->upd_dest(svc, dest); in __ip_vs_update_dest()
1085 struct ip_vs_scheduler *sched; in __ip_vs_unlink_dest() local
1087 sched = rcu_dereference_protected(svc->scheduler, 1); in __ip_vs_unlink_dest()
1088 if (sched && sched->del_dest) in __ip_vs_unlink_dest()
[all …]
Dip_vs_core.c314 struct ip_vs_scheduler *sched; in ip_vs_sched_persist() local
321 sched = rcu_dereference(svc->scheduler); in ip_vs_sched_persist()
322 if (sched) { in ip_vs_sched_persist()
325 dest = sched->schedule(svc, skb, iph); in ip_vs_sched_persist()
419 struct ip_vs_scheduler *sched; in ip_vs_schedule() local
475 sched = rcu_dereference(svc->scheduler); in ip_vs_schedule()
476 if (sched) { in ip_vs_schedule()
479 dest = sched->schedule(svc, skb, iph); in ip_vs_schedule()
/linux-4.1.27/crypto/
Dfcrypt.c54 __be32 sched[ROUNDS]; member
226 #define F_ENCRYPT(R, L, sched) \ argument
229 u.l = sched ^ R; \
245 F_ENCRYPT(X.r, X.l, ctx->sched[0x0]); in fcrypt_encrypt()
246 F_ENCRYPT(X.l, X.r, ctx->sched[0x1]); in fcrypt_encrypt()
247 F_ENCRYPT(X.r, X.l, ctx->sched[0x2]); in fcrypt_encrypt()
248 F_ENCRYPT(X.l, X.r, ctx->sched[0x3]); in fcrypt_encrypt()
249 F_ENCRYPT(X.r, X.l, ctx->sched[0x4]); in fcrypt_encrypt()
250 F_ENCRYPT(X.l, X.r, ctx->sched[0x5]); in fcrypt_encrypt()
251 F_ENCRYPT(X.r, X.l, ctx->sched[0x6]); in fcrypt_encrypt()
[all …]
/linux-4.1.27/drivers/net/wireless/ath/ath9k/
Dchannel.c243 if (likely(sc->sched.channel_switch_time)) in ath_chanctx_check_active()
245 usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active()
293 ictx->flush_timeout = usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active()
363 mod_timer(&sc->sched.timer, jiffies + tsf_time); in ath_chanctx_setup_timer()
379 if (ctx->active && sc->sched.extend_absence) { in ath_chanctx_handle_bmiss()
381 sc->sched.extend_absence = false; in ath_chanctx_handle_bmiss()
388 if (ctx->active && sc->sched.beacon_miss >= 2) { in ath_chanctx_handle_bmiss()
390 sc->sched.extend_absence = true; in ath_chanctx_handle_bmiss()
403 avp->offchannel_duration = sc->sched.offchannel_duration; in ath_chanctx_offchannel_noa()
431 if (sc->sched.extend_absence) in ath_chanctx_set_periodic_noa()
[all …]
Dxmit.c117 if (tid->sched) in ath_tx_queue_tid()
120 tid->sched = true; in ath_tx_queue_tid()
123 if (ac->sched) in ath_tx_queue_tid()
126 ac->sched = true; in ath_tx_queue_tid()
1554 if (!tid->sched) { in ath_tx_aggr_sleep()
1561 tid->sched = false; in ath_tx_aggr_sleep()
1564 if (ac->sched) { in ath_tx_aggr_sleep()
1565 ac->sched = false; in ath_tx_aggr_sleep()
1949 ac->sched = false; in ath_txq_schedule()
1956 tid->sched = false; in ath_txq_schedule()
[all …]
Ddebug_sta.c60 acno, ac->sched); in read_file_node_aggr()
83 tid->sched); in read_file_node_aggr()
Dath9k.h180 bool sched; member
255 bool sched; member
981 struct ath_chanctx_sched sched; member
Dmain.c509 bool sched = false; in ath_isr() local
547 sched = true; in ath_isr()
588 if (sched) { in ath_isr()
2536 sc->sched.mgd_prepare_tx = true; in ath9k_mgd_prepare_tx()
2552 sc->sched.mgd_prepare_tx = false; in ath9k_mgd_prepare_tx()
2565 sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE; in ath9k_mgd_prepare_tx()
/linux-4.1.27/Documentation/scheduler/
D00-INDEX3 sched-arch.txt
5 sched-bwc.txt
7 sched-design-CFS.txt
9 sched-domains.txt
11 sched-nice-design.txt
13 sched-rt-group.txt
15 sched-deadline.txt
17 sched-stats.txt
Dsched-domains.txt12 explicitly set. A sched domain's span means "balance process load among these
23 Balancing within a sched domain occurs between groups. That is, each group
28 In kernel/sched/core.c, trigger_load_balance() is run periodically on each CPU
35 at the time the scheduler_tick() happened and iterates over all sched domains
42 Initially, load_balance() finds the busiest group in the current sched domain.
47 computed while iterating over this sched domain's groups.
49 *** Implementing sched domains ***
60 The implementor should read comments in include/linux/sched.h:
65 while using the generic domain builder in kernel/sched/core.c if they wish to
74 The sched-domains debugging infrastructure can be enabled by enabling
[all …]
Dsched-design-CFS.txt131 SCHED_FIFO/_RR are implemented in sched/rt.c and are as specified by
146 sched/fair.c implements the CFS scheduler described above.
148 sched/rt.c implements SCHED_FIFO and SCHED_RR semantics, in a simpler way than
Dsched-bwc.txt5 The SCHED_RT case is covered in Documentation/scheduler/sched-rt-group.txt ]
Dcompletion.txt28 kernel/sched/completion.c - for details on completion design and
Dsched-deadline.txt230 Documentation/scheduler/sched-rt-group.txt), and is based on readable/
/linux-4.1.27/tools/perf/Documentation/
Dperf-sched.txt1 perf-sched(1)
6 perf-sched - Tool to trace/measure scheduler properties (latencies)
11 'perf sched' {record|latency|map|replay|script}
15 There are five variants of perf sched:
17 'perf sched record <command>' to record the scheduling events
20 'perf sched latency' to report the per task scheduling latencies
23 'perf sched script' to see a detailed trace of the workload that
26 'perf sched replay' to simulate the workload that was recorded
27 via perf sched record. (this is done by starting up mockup threads
33 'perf sched map' to print a textual context-switching outline of
[all …]
Dperf-bench.txt31 % perf bench sched pipe # with no style specified
42 % perf bench --format=simple sched pipe # specified simple
49 'sched'::
64 SUITES FOR 'sched'
92 % perf bench sched messaging # run with default
98 % perf bench sched messaging -t -g 20 # be multi-thread, with 20 groups
119 % perf bench sched pipe
126 % perf bench sched pipe -l 1000 # loop 1000
Dperf-inject.txt39 --sched-stat::
Dperf-script-perl.txt52 # perf record -a -e sched:sched_wakeup
58 (see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
77 sub sched::sched_wakeup
Dperf-trace.txt85 --sched:
Dperf-list.txt120 'subsys_glob:event_glob' to filter by tracepoint subsystems such as sched,
Dperf-script-python.txt450 # perf record -a -e sched:sched_wakeup
456 (see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
/linux-4.1.27/tools/perf/scripts/python/bin/
Dsched-migration-record2 perf record -m 16384 -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched
Dsched-migration-report3 perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/sched-migration.py
/linux-4.1.27/arch/x86/kernel/cpu/
Dperf_event.c662 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints, in perf_sched_init() argument
667 memset(sched, 0, sizeof(*sched)); in perf_sched_init()
668 sched->max_events = num; in perf_sched_init()
669 sched->max_weight = wmax; in perf_sched_init()
670 sched->max_gp = gpmax; in perf_sched_init()
671 sched->constraints = constraints; in perf_sched_init()
678 sched->state.event = idx; /* start with min weight */ in perf_sched_init()
679 sched->state.weight = wmin; in perf_sched_init()
680 sched->state.unassigned = num; in perf_sched_init()
683 static void perf_sched_save_state(struct perf_sched *sched) in perf_sched_save_state() argument
[all …]
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/socklnd/
Dsocklnd_cb.c687 ksock_sched_t *sched = conn->ksnc_scheduler; in ksocknal_queue_tx_locked() local
727 spin_lock_bh(&sched->kss_lock); in ksocknal_queue_tx_locked()
760 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs); in ksocknal_queue_tx_locked()
768 &sched->kss_tx_conns); in ksocknal_queue_tx_locked()
770 wake_up (&sched->kss_waitq); in ksocknal_queue_tx_locked()
773 spin_unlock_bh(&sched->kss_lock); in ksocknal_queue_tx_locked()
1318 ksock_sched_t *sched = conn->ksnc_scheduler; in ksocknal_recv() local
1349 spin_lock_bh(&sched->kss_lock); in ksocknal_recv()
1353 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns); in ksocknal_recv()
1354 wake_up (&sched->kss_waitq); in ksocknal_recv()
[all …]
Dsocklnd.c664 ksock_sched_t *sched; in ksocknal_choose_scheduler_locked() local
669 sched = &info->ksi_scheds[0]; in ksocknal_choose_scheduler_locked()
676 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns) in ksocknal_choose_scheduler_locked()
677 sched = &info->ksi_scheds[i]; in ksocknal_choose_scheduler_locked()
680 return sched; in ksocknal_choose_scheduler_locked()
1020 ksock_sched_t *sched; in ksocknal_create_conn() local
1253 sched = ksocknal_choose_scheduler_locked(cpt); in ksocknal_create_conn()
1254 sched->kss_nconns++; in ksocknal_create_conn()
1255 conn->ksnc_scheduler = sched; in ksocknal_create_conn()
1293 (int)(sched - &sched->kss_info->ksi_scheds[0])); in ksocknal_create_conn()
[all …]
Dsocklnd_lib-linux.c693 ksock_sched_t *sched; in ksocknal_lib_memory_pressure() local
695 sched = conn->ksnc_scheduler; in ksocknal_lib_memory_pressure()
696 spin_lock_bh(&sched->kss_lock); in ksocknal_lib_memory_pressure()
711 spin_unlock_bh(&sched->kss_lock); in ksocknal_lib_memory_pressure()
Dsocklnd_proto.c373 ksock_sched_t *sched = conn->ksnc_scheduler; in ksocknal_handle_zcreq() local
377 spin_lock_bh(&sched->kss_lock); in ksocknal_handle_zcreq()
381 spin_unlock_bh(&sched->kss_lock); in ksocknal_handle_zcreq()
/linux-4.1.27/tools/testing/selftests/ftrace/test.d/event/
Dsubsystem-enable.tc19 if [ ! -f set_event -o ! -d events/sched ]; then
38 echo 1 > events/sched/enable
49 echo 0 > events/sched/enable
Devent-enable.tc19 if [ ! -f set_event -o ! -d events/sched ]; then
38 echo 1 > events/sched/sched_switch/enable
49 echo 0 > events/sched/sched_switch/enable
/linux-4.1.27/drivers/staging/lustre/include/linux/libcfs/
Dlibcfs_workitem.h100 void cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
101 int cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
102 void cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
/linux-4.1.27/arch/x86/crypto/
Daes-i586-asm_32.S107 #define do_fcol(table, a1,a2,a3,a4, idx, tmp, sched) \ argument
108 mov 0 sched,%a1; \
110 mov 12 sched,%a2; \
112 mov 4 sched,%a4; \
120 mov 8 sched,%a3; \
126 #define do_icol(table, a1,a2,a3,a4, idx, tmp, sched) \ argument
127 mov 0 sched,%a1; \
129 mov 4 sched,%a2; \
131 mov 12 sched,%a4; \
139 mov 8 sched,%a3; \
/linux-4.1.27/tools/perf/scripts/perl/bin/
Dwakeup-latency-record2 perf record -e sched:sched_switch -e sched:sched_wakeup $@
/linux-4.1.27/arch/powerpc/kernel/
DMakefile20 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
21 CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog
22 CFLAGS_REMOVE_btext.o = -pg -mno-sched-epilog
23 CFLAGS_REMOVE_prom.o = -pg -mno-sched-epilog
25 CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
27 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
/linux-4.1.27/tools/perf/bench/
DBuild1 perf-y += sched-messaging.o
2 perf-y += sched-pipe.o
/linux-4.1.27/Documentation/DocBook/
D.device-drivers.xml.cmd2 …/sched.h kernel/sched/core.c kernel/sched/cpupri.c kernel/sched/fair.c include/linux/completion.h …
D.networking.xml.cmd2sched.c net/sunrpc/socklib.c net/sunrpc/stats.c net/sunrpc/rpc_pipe.c net/sunrpc/rpcb_clnt.c net/s…
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/
Do2iblnd.c649 struct kib_sched_info *sched; in kiblnd_create_conn() local
663 sched = kiblnd_data.kib_scheds[cpt]; in kiblnd_create_conn()
665 LASSERT(sched->ibs_nthreads > 0); in kiblnd_create_conn()
774 conn->ibc_sched = sched; in kiblnd_create_conn()
802 spin_lock_irqsave(&sched->ibs_lock, flags); in kiblnd_create_conn()
804 spin_unlock_irqrestore(&sched->ibs_lock, flags); in kiblnd_create_conn()
2668 struct kib_sched_info *sched; in kiblnd_base_shutdown() local
2694 cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) in kiblnd_base_shutdown()
2695 wake_up_all(&sched->ibs_waitq); in kiblnd_base_shutdown()
2807 struct kib_sched_info *sched; in kiblnd_base_startup() local
[all …]
Do2iblnd_cb.c134 struct kib_sched_info *sched = conn->ibc_sched; in kiblnd_drop_rx() local
137 spin_lock_irqsave(&sched->ibs_lock, flags); in kiblnd_drop_rx()
140 spin_unlock_irqrestore(&sched->ibs_lock, flags); in kiblnd_drop_rx()
3277 struct kib_sched_info *sched = conn->ibc_sched; in kiblnd_cq_completion() local
3282 spin_lock_irqsave(&sched->ibs_lock, flags); in kiblnd_cq_completion()
3291 list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns); in kiblnd_cq_completion()
3293 if (waitqueue_active(&sched->ibs_waitq)) in kiblnd_cq_completion()
3294 wake_up(&sched->ibs_waitq); in kiblnd_cq_completion()
3297 spin_unlock_irqrestore(&sched->ibs_lock, flags); in kiblnd_cq_completion()
3313 struct kib_sched_info *sched; in kiblnd_scheduler() local
[all …]
/linux-4.1.27/tools/perf/scripts/perl/
Dwakeup-latency.pl28 sub sched::sched_switch subroutine
51 sub sched::sched_wakeup subroutine
/linux-4.1.27/drivers/usb/host/
Dehci-sched.c1274 struct ehci_iso_sched *sched; in itd_urb_transaction() local
1277 sched = iso_sched_alloc (urb->number_of_packets, mem_flags); in itd_urb_transaction()
1278 if (unlikely (sched == NULL)) in itd_urb_transaction()
1281 itd_sched_init(ehci, sched, stream, urb); in itd_urb_transaction()
1284 num_itds = 1 + (sched->span + 7) / 8; in itd_urb_transaction()
1310 iso_sched_free(stream, sched); in itd_urb_transaction()
1319 list_add (&itd->itd_list, &sched->td_list); in itd_urb_transaction()
1324 urb->hcpriv = sched; in itd_urb_transaction()
1412 struct ehci_iso_sched *sched, in sitd_slot_ok() argument
1505 struct ehci_iso_sched *sched = urb->hcpriv; in iso_stream_schedule() local
[all …]
DMakefile9 fhci-y += fhci-mem.o fhci-tds.o fhci-sched.o
Dfusbh200-hcd.c4213 struct fusbh200_iso_sched *sched; in itd_urb_transaction() local
4216 sched = iso_sched_alloc (urb->number_of_packets, mem_flags); in itd_urb_transaction()
4217 if (unlikely (sched == NULL)) in itd_urb_transaction()
4220 itd_sched_init(fusbh200, sched, stream, urb); in itd_urb_transaction()
4223 num_itds = 1 + (sched->span + 7) / 8; in itd_urb_transaction()
4249 iso_sched_free(stream, sched); in itd_urb_transaction()
4257 list_add (&itd->itd_list, &sched->td_list); in itd_urb_transaction()
4262 urb->hcpriv = sched; in itd_urb_transaction()
4314 struct fusbh200_iso_sched *sched = urb->hcpriv; in iso_stream_schedule() local
4317 span = sched->span; in iso_stream_schedule()
[all …]
Dfotg210-hcd.c4279 struct fotg210_iso_sched *sched; in itd_urb_transaction() local
4282 sched = iso_sched_alloc(urb->number_of_packets, mem_flags); in itd_urb_transaction()
4283 if (unlikely(sched == NULL)) in itd_urb_transaction()
4286 itd_sched_init(fotg210, sched, stream, urb); in itd_urb_transaction()
4289 num_itds = 1 + (sched->span + 7) / 8; in itd_urb_transaction()
4315 iso_sched_free(stream, sched); in itd_urb_transaction()
4323 list_add(&itd->itd_list, &sched->td_list); in itd_urb_transaction()
4328 urb->hcpriv = sched; in itd_urb_transaction()
4380 struct fotg210_iso_sched *sched = urb->hcpriv; in iso_stream_schedule() local
4383 span = sched->span; in iso_stream_schedule()
[all …]
/linux-4.1.27/Documentation/trace/
Devents.txt48 The events are organized into subsystems, such as ext4, irq, sched,
65 # echo 1 > /sys/kernel/debug/tracing/events/sched/sched_wakeup/enable
69 # echo 0 > /sys/kernel/debug/tracing/events/sched/sched_wakeup/enable
71 To enable all events in sched subsystem:
73 # echo 1 > /sys/kernel/debug/tracing/events/sched/enable
128 # cat /sys/kernel/debug/tracing/events/sched/sched_wakeup/format
211 # cd /sys/kernel/debug/tracing/events/sched/sched_wakeup
261 Clear the filters on all events in the sched subsystem:
263 # cd /sys/kernel/debug/tracing/events/sched
270 Set a filter using only common fields for all events in the sched
[all …]
Dftrace.txt2456 echo 'try_to_wake_up:enable_event:sched:sched_switch:2' > \
2467 echo '!try_to_wake_up:enable_event:sched:sched_switch:0' > \
2469 echo '!schedule:disable_event:sched:sched_switch' > \
2617 # echo 1 > events/sched/enable
2701 # echo 1 > instances/foo/events/sched/sched_wakeup/enable
2702 # echo 1 > instances/foo/events/sched/sched_wakeup_new/enable
2703 # echo 1 > instances/foo/events/sched/sched_switch/enable
/linux-4.1.27/Documentation/cgroups/
Dcpusets.txt145 - in sched.c migrate_live_tasks(), to keep migrating tasks within
376 The kernel scheduler (kernel/sched/core.c) automatically load balances
385 has support to partition the systems CPUs into a number of sched
386 domains such that it only load balances within each sched domain.
387 Each sched domain covers some subset of the CPUs in the system;
388 no two sched domains overlap; some CPUs might not be in any sched
391 Put simply, it costs less to balance between two smaller sched domains
395 By default, there is one sched domain covering all CPUs, including those
411 be contained in a single sched domain, ensuring that load balancing
421 enabled, then the scheduler will have one sched domain covering all
[all …]
/linux-4.1.27/tools/testing/selftests/rcutorture/configs/rcu/
DTREE05.boot1 rcutorture.torture_type=sched
DTREE08.boot1 rcutorture.torture_type=sched
/linux-4.1.27/net/ipv6/
Dip6_flowlabel.c134 unsigned long sched = 0; in ip6_fl_gc() local
156 if (!sched || time_before(ttd, sched)) in ip6_fl_gc()
157 sched = ttd; in ip6_fl_gc()
162 if (!sched && atomic_read(&fl_size)) in ip6_fl_gc()
163 sched = now + FL_MAX_LINGER; in ip6_fl_gc()
164 if (sched) { in ip6_fl_gc()
165 mod_timer(&ip6_fl_gc_timer, sched); in ip6_fl_gc()
/linux-4.1.27/tools/testing/selftests/ftrace/test.d/00basic/
Dbasic4.tc5 grep -q sched available_events && exit 0 || exit $FAIL
/linux-4.1.27/drivers/usb/c67x00/
DMakefile7 c67x00-y := c67x00-drv.o c67x00-ll-hpi.o c67x00-hcd.o c67x00-sched.o
/linux-4.1.27/arch/arm/plat-versatile/
DMakefile4 obj-$(CONFIG_PLAT_VERSATILE_SCHED_CLOCK) += sched-clock.o
/linux-4.1.27/arch/tile/kernel/
Dregs_64.S54 STD_ENTRY_SECTION(__switch_to, .sched.text)
99 STD_ENTRY_SECTION(get_switch_to_pc, .sched.text)
Dregs_32.S54 STD_ENTRY_SECTION(__switch_to, .sched.text)
99 STD_ENTRY_SECTION(get_switch_to_pc, .sched.text)
/linux-4.1.27/arch/powerpc/platforms/powermac/
DMakefile5 CFLAGS_REMOVE_bootx_init.o = -pg -mno-sched-epilog
/linux-4.1.27/arch/arc/kernel/
Dctx_sw_asm.S21 .section .sched.text,"ax",@progbits
/linux-4.1.27/net/sunrpc/
DMakefile11 sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
Diwch_cm.c2203 static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) in sched() function
2239 [CPL_ACT_ESTABLISH] = sched,
2240 [CPL_ACT_OPEN_RPL] = sched,
2241 [CPL_RX_DATA] = sched,
2242 [CPL_TX_DMA_ACK] = sched,
2243 [CPL_ABORT_RPL_RSS] = sched,
2244 [CPL_ABORT_RPL] = sched,
2245 [CPL_PASS_OPEN_RPL] = sched,
2246 [CPL_CLOSE_LISTSRV_RPL] = sched,
2247 [CPL_PASS_ACCEPT_REQ] = sched,
[all …]
/linux-4.1.27/drivers/input/joystick/
Dsidewinder.c138 int timeout, bitout, sched, i, kick, start, strobe; in sw_read_packet() local
148 sched = 0; in sw_read_packet()
168 sched--; in sw_read_packet()
181 sched = kick; /* Schedule second trigger */ in sw_read_packet()
186 if (pending && sched < 0 && (i > -SW_END)) { /* Second trigger time */ in sw_read_packet()
/linux-4.1.27/arch/unicore32/
DMakefile30 KBUILD_CFLAGS += -mno-sched-prolog
/linux-4.1.27/Documentation/RCU/
Dlockdep.txt15 rcu_read_lock_sched_held() for RCU-sched.
31 Check for RCU-sched read-side critical section.
45 is invoked by both RCU-sched readers and updaters.
Dstallwarn.txt71 and that the stall was affecting RCU-sched. This message will normally be
73 RCU and RCU-sched are implemented by the same underlying mechanism,
183 result in RCU-sched and RCU-bh stalls.
186 result in RCU-sched stalls and, if ksoftirqd is in use, RCU-bh
190 result in RCU-sched and RCU-bh stalls.
235 The RCU, RCU-sched, RCU-bh, and RCU-tasks implementations have CPU stall
DNMI-RCU.txt120 being protected by RCU-sched.
Dtorture.txt195 "sched": preempt_disable(), preempt_enable(), and
DwhatisRCU.txt854 sched: Critical sections Grace period Barrier
908 If so, RCU-sched is the only choice that will work for you.
Dtrace.txt22 For CONFIG_TREE_RCU, the RCU flavor maps onto the RCU-sched flavor,
/linux-4.1.27/Documentation/ABI/testing/
Dsysfs-kernel-uids14 Documentation/scheduler/sched-design-CFS.txt
/linux-4.1.27/arch/xtensa/kernel/
Dvmlinux.lds.S94 *(.sched.literal .sched.text)
/linux-4.1.27/Documentation/block/
D00-INDEX25 switching-sched.txt
Ddeadline-iosched.txt10 Refer to Documentation/block/switching-sched.txt for information on
/linux-4.1.27/kernel/time/
DMakefile11 obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o tick-sched.o
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/
DMakefile4 spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb/
Dsge.c235 struct sched { struct
271 struct sched *tx_sched;
284 struct sched *s = sge->tx_sched; in tx_sched_stop()
300 struct sched *s = sge->tx_sched; in t1_sched_update_parms()
345 struct sched *s = sge->tx_sched;
360 struct sched *s = sge->tx_sched;
373 struct sched *s; in tx_sched_init()
376 s = kzalloc(sizeof (struct sched), GFP_KERNEL); in tx_sched_init()
399 struct sched *s = sge->tx_sched; in sched_update_avail()
434 struct sched *s = sge->tx_sched; in sched_skb()
/linux-4.1.27/Documentation/
Dstable_kernel_rules.txt72 Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
73 Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
74 Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
Dmagic-number.txt148 HTB_CMAGIC 0xFEFAFEF1 htb_class net/sched/sch_htb.c
Dstatic-keys.txt246 'pipe-test' (also known as 'perf bench sched pipe') can be used to show the
Dunshare.txt122 #include <sched.h>
Dkernel-parameters.txt2988 for RCU-preempt, and "s" for RCU-sched, and "N"
3204 Run the RCU sched early boot self tests
3682 frequency tracepoints such as irq or sched, can cause
/linux-4.1.27/net/rxrpc/
Dar-call.c757 bool sched; in rxrpc_mark_call_released() local
761 sched = false; in rxrpc_mark_call_released()
767 sched = true; in rxrpc_mark_call_released()
770 sched = true; in rxrpc_mark_call_released()
771 if (sched) in rxrpc_mark_call_released()
/linux-4.1.27/net/
DMakefile15 obj-$(CONFIG_NET) += ethernet/ 802/ sched/ netlink/
DKconfig222 source "net/sched/Kconfig"
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb3/
Dcxgb3_ioctl.h101 uint8_t sched; member
Dcxgb3_main.c801 char *buf, int sched) in tm_attr_show() argument
808 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; in tm_attr_show()
812 if (sched & 1) in tm_attr_show()
827 const char *buf, size_t len, int sched) in tm_attr_store() argument
843 ret = t3_config_sched(adap, val, sched); in tm_attr_store()
850 #define TM_ATTR(name, sched) \ argument
854 return tm_attr_show(d, buf, sched); \
859 return tm_attr_store(d, buf, len, sched); \
939 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, in send_pktsched_cmd() argument
955 req->sched = sched; in send_pktsched_cmd()
Dcommon.h732 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
Dt3_hw.c3002 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched) in t3_config_sched() argument
3028 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); in t3_config_sched()
3030 if (sched & 1) in t3_config_sched()
Dt3_cpl.h889 __u8 sched; member
/linux-4.1.27/drivers/net/wireless/iwlwifi/mvm/
Dscan.c579 bool sched = mvm->scan_status == IWL_MVM_SCAN_SCHED; in iwl_mvm_scan_offload_stop() local
603 sched ? "offloaded " : "", ret); in iwl_mvm_scan_offload_stop()
609 sched ? "offloaded " : ""); in iwl_mvm_scan_offload_stop()
625 if (sched) in iwl_mvm_scan_offload_stop()
1488 bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN); in iwl_mvm_rx_umac_scan_complete_notif() local
1499 uid, sched ? "sched" : "regular", in iwl_mvm_rx_umac_scan_complete_notif()
1510 if (!sched) { in iwl_mvm_rx_umac_scan_complete_notif()
/linux-4.1.27/tools/perf/tests/
DBuild13 perf-y += evsel-tp-sched.o
/linux-4.1.27/net/sched/
Dsch_hfsc.c122 struct hfsc_sched *sched; /* scheduler data */ member
196 struct rb_node **p = &cl->sched->eligible.rb_node; in eltree_insert()
209 rb_insert_color(&cl->el_node, &cl->sched->eligible); in eltree_insert()
215 rb_erase(&cl->el_node, &cl->sched->eligible); in eltree_remove()
861 list_add_tail(&cl->dlist, &cl->sched->droplist); in set_active()
1087 cl->sched = q; in hfsc_change_class()
1453 q->root.sched = q; in hfsc_init_qdisc()
DKconfig55 See the top of <file:net/sched/sch_cbq.c> for more details.
96 See the top of <file:net/sched/sch_atm.c> for more details.
125 See the top of <file:net/sched/sch_red.c> for more details.
136 See the top of <file:net/sched/sch_sfb.c> for more details.
147 See the top of <file:net/sched/sch_sfq.c> for more details.
159 See the top of <file:net/sched/sch_teql.c> for more details.
170 See the top of <file:net/sched/sch_tbf.c> for more details.
180 (see the top of <file:net/sched/sch_red.c> for details and
Dsch_cbq.c492 psched_time_t sched = q->now; in cbq_ovl_delay() local
503 sched += delay + cl->penalty; in cbq_ovl_delay()
504 cl->penalized = sched; in cbq_ovl_delay()
508 expires = ns_to_ktime(PSCHED_TICKS2NS(sched)); in cbq_ovl_delay()
557 psched_time_t sched = now; in cbq_undelay_prio() local
580 } else if (sched - cl->penalized > 0) in cbq_undelay_prio()
581 sched = cl->penalized; in cbq_undelay_prio()
584 return sched - now; in cbq_undelay_prio()
/linux-4.1.27/drivers/input/serio/
Dhil_mlc.c737 goto sched; in hilse_donode()
742 if (tv.tv_usec >= mlc->intimeout) goto sched; in hilse_donode()
744 if (!tv.tv_usec) goto sched; in hilse_donode()
747 sched: in hilse_donode()
/linux-4.1.27/tools/power/cpupower/po/
Dde.po690 " -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
696 " -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
707 msgid "--sched-mc param out of range [0-%d]\n"
712 msgid "--sched-smt param out of range [0-%d]\n"
717 msgid "Error setting sched-mc %s\n"
722 msgid "Error setting sched-smt %s\n"
744 msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
750 " -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
Dcs.po679 " -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
685 " -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
696 msgid "--sched-mc param out of range [0-%d]\n"
701 msgid "--sched-smt param out of range [0-%d]\n"
706 msgid "Error setting sched-mc %s\n"
711 msgid "Error setting sched-smt %s\n"
733 msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
739 " -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
Dit.po687 " -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
693 " -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
704 msgid "--sched-mc param out of range [0-%d]\n"
709 msgid "--sched-smt param out of range [0-%d]\n"
714 msgid "Error setting sched-mc %s\n"
719 msgid "Error setting sched-smt %s\n"
741 msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
748 " -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
Dfr.po679 " -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
685 " -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
696 msgid "--sched-mc param out of range [0-%d]\n"
701 msgid "--sched-smt param out of range [0-%d]\n"
706 msgid "Error setting sched-mc %s\n"
711 msgid "Error setting sched-smt %s\n"
733 msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
739 " -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
Dpt.po689 " -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
695 " -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
706 msgid "--sched-mc param out of range [0-%d]\n"
711 msgid "--sched-smt param out of range [0-%d]\n"
716 msgid "Error setting sched-mc %s\n"
721 msgid "Error setting sched-smt %s\n"
743 msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
750 " -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
/linux-4.1.27/tools/testing/selftests/rcutorture/doc/
DTREE_RCU-kconfig.txt31 RCU-sched: Do one with PREEMPT but not BOOST.
/linux-4.1.27/drivers/staging/lustre/lnet/selftest/
Dselftest.h469 swi_action_t action, struct cfs_wi_sched *sched) in swi_init_workitem() argument
471 swi->swi_sched = sched; in swi_init_workitem()
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
Dcm.c3914 static int sched(struct c4iw_dev *dev, struct sk_buff *skb) in sched() function
3961 sched(dev, skb); in fw6_msg()
4008 sched(dev, skb); in peer_abort_intr()
4017 [CPL_ACT_ESTABLISH] = sched,
4018 [CPL_ACT_OPEN_RPL] = sched,
4019 [CPL_RX_DATA] = sched,
4020 [CPL_ABORT_RPL_RSS] = sched,
4021 [CPL_ABORT_RPL] = sched,
4022 [CPL_PASS_OPEN_RPL] = sched,
4023 [CPL_CLOSE_LISTSRV_RPL] = sched,
[all …]
/linux-4.1.27/lib/
Ddebugobjects.c194 int sched = 0; in free_object() local
202 sched = keventd_up(); in free_object()
207 if (sched) in free_object()
/linux-4.1.27/tools/testing/selftests/rcutorture/bin/
Dkvm.sh394 elif test "$dryrun" = sched
/linux-4.1.27/kernel/
DMakefile25 obj-y += sched/
/linux-4.1.27/tools/virtio/virtio-trace/
DREADME98 # echo 1 > /sys/kernel/debug/tracing/events/sched/enable
/linux-4.1.27/Documentation/zh_CN/
Dmagic-number.txt148 HTB_CMAGIC 0xFEFAFEF1 htb_class net/sched/sch_htb.c
/linux-4.1.27/arch/powerpc/
DMakefile203 KBUILD_CFLAGS += -mno-sched-epilog
/linux-4.1.27/arch/arm/
DMakefile40 KBUILD_CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
/linux-4.1.27/drivers/staging/wlan-ng/
Dhfa384x_usb.c2789 int sched; in hfa384x_tx_timeout() local
2791 sched = !test_and_set_bit(WORK_TX_HALT, &hw->usb_flags); in hfa384x_tx_timeout()
2792 sched |= !test_and_set_bit(WORK_RX_HALT, &hw->usb_flags); in hfa384x_tx_timeout()
2793 if (sched) in hfa384x_tx_timeout()
/linux-4.1.27/arch/mips/
DMakefile125 cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
/linux-4.1.27/include/trace/events/
Dsched.h2 #define TRACE_SYSTEM sched
/linux-4.1.27/include/asm-generic/
Dvmlinux.lds.h428 *(.sched.text) \
/linux-4.1.27/Documentation/locking/
Dspinlocks.txt140 handling in kernel/sched/core.c - nothing ever _changes_ a wait-queue from
Drt-mutex-design.txt387 Note that rt_mutex_setprio is defined in kernel/sched/core.c to implement the
/linux-4.1.27/net/unix/
Daf_unix.c1156 int sched; in unix_wait_for_peer() local
1161 sched = !sock_flag(other, SOCK_DEAD) && in unix_wait_for_peer()
1167 if (sched) in unix_wait_for_peer()
/linux-4.1.27/Documentation/vm/
Dnuma101 structures [see Documentation/scheduler/sched-domains.txt]--and the scheduler
/linux-4.1.27/drivers/s390/cio/
Dqdio_main.c861 goto sched; in __qdio_outbound_processing()
878 sched: in __qdio_outbound_processing()
/linux-4.1.27/include/uapi/linux/
DKbuild354 header-y += sched.h
/linux-4.1.27/include/net/
Dip_vs.h1355 struct ip_vs_scheduler *sched);
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
Dresource_tracker.c651 u8 sched = *(u8 *)(inbox->buf + 64); in update_pkey_index() local
657 port = (sched >> 6 & 1) + 1; in update_pkey_index()
3554 u8 sched = *(u8 *)(inbox->buf + 64); in roce_verify_mac() local
3557 port = (sched >> 6 & 1) + 1; in roce_verify_mac()
/linux-4.1.27/init/
DKconfig735 "s" for RCU-sched. Nothing prevents this kthread from running
946 - Documentation/scheduler/sched-design-CFS.txt (CFS)
1099 See tip/Documentation/scheduler/sched-bwc.txt for more information.
1110 See Documentation/scheduler/sched-rt-group.txt for more information.
/linux-4.1.27/Documentation/timers/
DNO_HZ.txt344 load, maintaining sched average, computing CFS entity vruntime,
/linux-4.1.27/Documentation/ia64/
Derr_inject.txt92 #include <sched.h>
/linux-4.1.27/Documentation/filesystems/
Dspufs.txt297 ate(2) to address a specific SPU context. When the context gets sched-
Dproc.txt1852 own. Sensitive files like cmdline, sched*, status are now protected against
/linux-4.1.27/Documentation/s390/
DDebugging390.txt349 defined in linux/include/linux/sched.h
983 Now make CC:="s390-gcc -g" kernel/sched.s
985 ( task_struct is defined in include/linux/sched.h ).
/linux-4.1.27/
DMAINTAINERS6736 F: net/sched/sch_netem.c
8667 T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
8669 F: kernel/sched/
8670 F: include/linux/sched.h
8671 F: include/uapi/linux/sched.h
9620 F: net/sched/
DCREDITS714 D: Assorted sched/mm titbits