Home
last modified time | relevance | path

Searched refs:sched (Results 1 – 144 of 144) sorted by relevance

/linux-4.4.14/drivers/staging/lustre/lustre/libcfs/
Dworkitem.c90 cfs_wi_sched_lock(struct cfs_wi_sched *sched) in cfs_wi_sched_lock() argument
92 spin_lock(&sched->ws_lock); in cfs_wi_sched_lock()
96 cfs_wi_sched_unlock(struct cfs_wi_sched *sched) in cfs_wi_sched_unlock() argument
98 spin_unlock(&sched->ws_lock); in cfs_wi_sched_unlock()
102 cfs_wi_sched_cansleep(struct cfs_wi_sched *sched) in cfs_wi_sched_cansleep() argument
104 cfs_wi_sched_lock(sched); in cfs_wi_sched_cansleep()
105 if (sched->ws_stopping) { in cfs_wi_sched_cansleep()
106 cfs_wi_sched_unlock(sched); in cfs_wi_sched_cansleep()
110 if (!list_empty(&sched->ws_runq)) { in cfs_wi_sched_cansleep()
111 cfs_wi_sched_unlock(sched); in cfs_wi_sched_cansleep()
[all …]
/linux-4.4.14/tools/perf/
Dbuiltin-sched.c106 int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
109 int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
112 int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
116 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
119 int (*migrate_task_event)(struct perf_sched *sched,
187 static void burn_nsecs(struct perf_sched *sched, u64 nsecs) in burn_nsecs() argument
193 } while (T1 + sched->run_measurement_overhead < T0 + nsecs); in burn_nsecs()
206 static void calibrate_run_measurement_overhead(struct perf_sched *sched) in calibrate_run_measurement_overhead() argument
213 burn_nsecs(sched, 0); in calibrate_run_measurement_overhead()
218 sched->run_measurement_overhead = min_delta; in calibrate_run_measurement_overhead()
[all …]
Dcommand-list.txt22 perf-sched mainporcelain common
DBuild6 perf-y += builtin-sched.o
Dperf-completion.sh168 if [[ $prev_skip_opts == @(kvm|kmem|mem|lock|sched|
Dbuiltin-trace.c1441 bool sched; member
2554 if (trace->sched && in trace__run()
3080 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"), in cmd_trace()
/linux-4.4.14/drivers/gpu/drm/amd/scheduler/
Dgpu_scheduler.c34 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
118 int amd_sched_entity_init(struct amd_gpu_scheduler *sched, in amd_sched_entity_init() argument
125 if (!(sched && entity && rq)) in amd_sched_entity_init()
131 entity->sched = sched; in amd_sched_entity_init()
155 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, in amd_sched_entity_is_initialized() argument
158 return entity->sched == sched && in amd_sched_entity_is_initialized()
204 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, in amd_sched_entity_fini() argument
209 if (!amd_sched_entity_is_initialized(sched, entity)) in amd_sched_entity_fini()
216 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); in amd_sched_entity_fini()
228 amd_sched_wakeup(entity->sched); in amd_sched_entity_wakeup()
[all …]
Dgpu_scheduler.h47 struct amd_gpu_scheduler *sched; member
74 struct amd_gpu_scheduler *sched; member
82 struct amd_gpu_scheduler *sched; member
125 int amd_sched_init(struct amd_gpu_scheduler *sched,
128 void amd_sched_fini(struct amd_gpu_scheduler *sched);
130 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
134 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
Dsched_fence.c41 fence->sched = s_entity->sched; in amd_sched_fence_create()
79 return (const char *)fence->sched->name; in amd_sched_fence_get_timeline_name()
Dgpu_sched_trace.h30 __entry->name = sched_job->sched->name;
34 &sched_job->sched->hw_rq_count);
/linux-4.4.14/net/netfilter/ipvs/
Dip_vs_sched.c66 struct ip_vs_scheduler *sched) in ip_vs_unbind_scheduler() argument
75 if (sched->done_service) in ip_vs_unbind_scheduler()
76 sched->done_service(svc); in ip_vs_unbind_scheduler()
86 struct ip_vs_scheduler *sched; in ip_vs_sched_getbyname() local
92 list_for_each_entry(sched, &ip_vs_schedulers, n_list) { in ip_vs_sched_getbyname()
96 if (sched->module && !try_module_get(sched->module)) { in ip_vs_sched_getbyname()
102 if (strcmp(sched_name, sched->name)==0) { in ip_vs_sched_getbyname()
105 return sched; in ip_vs_sched_getbyname()
107 module_put(sched->module); in ip_vs_sched_getbyname()
120 struct ip_vs_scheduler *sched; in ip_vs_scheduler_get() local
[all …]
Dip_vs_ctl.c790 struct ip_vs_scheduler *sched; in __ip_vs_update_dest() local
846 sched = rcu_dereference_protected(svc->scheduler, 1); in __ip_vs_update_dest()
847 if (sched && sched->add_dest) in __ip_vs_update_dest()
848 sched->add_dest(svc, dest); in __ip_vs_update_dest()
850 sched = rcu_dereference_protected(svc->scheduler, 1); in __ip_vs_update_dest()
851 if (sched && sched->upd_dest) in __ip_vs_update_dest()
852 sched->upd_dest(svc, dest); in __ip_vs_update_dest()
1080 struct ip_vs_scheduler *sched; in __ip_vs_unlink_dest() local
1082 sched = rcu_dereference_protected(svc->scheduler, 1); in __ip_vs_unlink_dest()
1083 if (sched && sched->del_dest) in __ip_vs_unlink_dest()
[all …]
Dip_vs_core.c324 struct ip_vs_scheduler *sched; in ip_vs_sched_persist() local
331 sched = rcu_dereference(svc->scheduler); in ip_vs_sched_persist()
332 if (sched) { in ip_vs_sched_persist()
335 dest = sched->schedule(svc, skb, iph); in ip_vs_sched_persist()
429 struct ip_vs_scheduler *sched; in ip_vs_schedule() local
504 sched = rcu_dereference(svc->scheduler); in ip_vs_schedule()
505 if (sched) { in ip_vs_schedule()
508 dest = sched->schedule(svc, skb, iph); in ip_vs_schedule()
/linux-4.4.14/crypto/
Dfcrypt.c54 __be32 sched[ROUNDS]; member
226 #define F_ENCRYPT(R, L, sched) \ argument
229 u.l = sched ^ R; \
245 F_ENCRYPT(X.r, X.l, ctx->sched[0x0]); in fcrypt_encrypt()
246 F_ENCRYPT(X.l, X.r, ctx->sched[0x1]); in fcrypt_encrypt()
247 F_ENCRYPT(X.r, X.l, ctx->sched[0x2]); in fcrypt_encrypt()
248 F_ENCRYPT(X.l, X.r, ctx->sched[0x3]); in fcrypt_encrypt()
249 F_ENCRYPT(X.r, X.l, ctx->sched[0x4]); in fcrypt_encrypt()
250 F_ENCRYPT(X.l, X.r, ctx->sched[0x5]); in fcrypt_encrypt()
251 F_ENCRYPT(X.r, X.l, ctx->sched[0x6]); in fcrypt_encrypt()
[all …]
/linux-4.4.14/drivers/net/wireless/ath/ath9k/
Dchannel.c243 if (likely(sc->sched.channel_switch_time)) in ath_chanctx_check_active()
245 usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active()
293 ictx->flush_timeout = usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active()
363 mod_timer(&sc->sched.timer, jiffies + tsf_time); in ath_chanctx_setup_timer()
379 if (ctx->active && sc->sched.extend_absence) { in ath_chanctx_handle_bmiss()
381 sc->sched.extend_absence = false; in ath_chanctx_handle_bmiss()
388 if (ctx->active && sc->sched.beacon_miss >= 2) { in ath_chanctx_handle_bmiss()
390 sc->sched.extend_absence = true; in ath_chanctx_handle_bmiss()
403 avp->offchannel_duration = sc->sched.offchannel_duration; in ath_chanctx_offchannel_noa()
431 if (sc->sched.extend_absence) in ath_chanctx_set_periodic_noa()
[all …]
Dmain.c509 bool sched = false; in ath_isr() local
547 sched = true; in ath_isr()
588 if (sched) { in ath_isr()
2546 sc->sched.mgd_prepare_tx = true; in ath9k_mgd_prepare_tx()
2562 sc->sched.mgd_prepare_tx = false; in ath9k_mgd_prepare_tx()
2575 sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE; in ath9k_mgd_prepare_tx()
Dath9k.h980 struct ath_chanctx_sched sched; member
/linux-4.4.14/Documentation/scheduler/
D00-INDEX3 sched-arch.txt
5 sched-bwc.txt
7 sched-design-CFS.txt
9 sched-domains.txt
11 sched-nice-design.txt
13 sched-rt-group.txt
15 sched-deadline.txt
17 sched-stats.txt
Dsched-domains.txt12 explicitly set. A sched domain's span means "balance process load among these
23 Balancing within a sched domain occurs between groups. That is, each group
28 In kernel/sched/core.c, trigger_load_balance() is run periodically on each CPU
35 at the time the scheduler_tick() happened and iterates over all sched domains
42 Initially, load_balance() finds the busiest group in the current sched domain.
47 computed while iterating over this sched domain's groups.
49 *** Implementing sched domains ***
60 The implementor should read comments in include/linux/sched.h:
65 while using the generic domain builder in kernel/sched/core.c if they wish to
74 The sched-domains debugging infrastructure can be enabled by enabling
[all …]
Dsched-design-CFS.txt131 SCHED_FIFO/_RR are implemented in sched/rt.c and are as specified by
146 sched/fair.c implements the CFS scheduler described above.
148 sched/rt.c implements SCHED_FIFO and SCHED_RR semantics, in a simpler way than
Dsched-bwc.txt5 The SCHED_RT case is covered in Documentation/scheduler/sched-rt-group.txt ]
Dcompletion.txt28 kernel/sched/completion.c - for details on completion design and
Dsched-deadline.txt354 Documentation/scheduler/sched-rt-group.txt), and is based on readable/
/linux-4.4.14/tools/perf/Documentation/
Dperf-sched.txt1 perf-sched(1)
6 perf-sched - Tool to trace/measure scheduler properties (latencies)
11 'perf sched' {record|latency|map|replay|script}
15 There are five variants of perf sched:
17 'perf sched record <command>' to record the scheduling events
20 'perf sched latency' to report the per task scheduling latencies
23 'perf sched script' to see a detailed trace of the workload that
26 'perf sched replay' to simulate the workload that was recorded
27 via perf sched record. (this is done by starting up mockup threads
33 'perf sched map' to print a textual context-switching outline of
[all …]
Dperf-bench.txt31 % perf bench sched pipe # with no style specified
42 % perf bench --format=simple sched pipe # specified simple
49 'sched'::
64 SUITES FOR 'sched'
92 % perf bench sched messaging # run with default
98 % perf bench sched messaging -t -g 20 # be multi-thread, with 20 groups
119 % perf bench sched pipe
126 % perf bench sched pipe -l 1000 # loop 1000
Dperf-inject.txt39 --sched-stat::
Dperf-script-perl.txt52 # perf record -a -e sched:sched_wakeup
58 (see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
77 sub sched::sched_wakeup
Dperf-list.txt121 'subsys_glob:event_glob' to filter by tracepoint subsystems such as sched,
Dperf-trace.txt84 --sched:
Dperf-script-python.txt450 # perf record -a -e sched:sched_wakeup
456 (see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
/linux-4.4.14/tools/perf/scripts/python/bin/
Dsched-migration-record2 perf record -m 16384 -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched
Dsched-migration-report3 perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/sched-migration.py
/linux-4.4.14/arch/x86/kernel/cpu/
Dperf_event.c680 static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints, in perf_sched_init() argument
685 memset(sched, 0, sizeof(*sched)); in perf_sched_init()
686 sched->max_events = num; in perf_sched_init()
687 sched->max_weight = wmax; in perf_sched_init()
688 sched->max_gp = gpmax; in perf_sched_init()
689 sched->constraints = constraints; in perf_sched_init()
696 sched->state.event = idx; /* start with min weight */ in perf_sched_init()
697 sched->state.weight = wmin; in perf_sched_init()
698 sched->state.unassigned = num; in perf_sched_init()
701 static void perf_sched_save_state(struct perf_sched *sched) in perf_sched_save_state() argument
[all …]
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/socklnd/
Dsocklnd_cb.c686 ksock_sched_t *sched = conn->ksnc_scheduler; in ksocknal_queue_tx_locked() local
726 spin_lock_bh(&sched->kss_lock); in ksocknal_queue_tx_locked()
759 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs); in ksocknal_queue_tx_locked()
767 &sched->kss_tx_conns); in ksocknal_queue_tx_locked()
769 wake_up (&sched->kss_waitq); in ksocknal_queue_tx_locked()
772 spin_unlock_bh(&sched->kss_lock); in ksocknal_queue_tx_locked()
1316 ksock_sched_t *sched = conn->ksnc_scheduler; in ksocknal_recv() local
1347 spin_lock_bh(&sched->kss_lock); in ksocknal_recv()
1351 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns); in ksocknal_recv()
1352 wake_up (&sched->kss_waitq); in ksocknal_recv()
[all …]
Dsocklnd.c664 ksock_sched_t *sched; in ksocknal_choose_scheduler_locked() local
669 sched = &info->ksi_scheds[0]; in ksocknal_choose_scheduler_locked()
676 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns) in ksocknal_choose_scheduler_locked()
677 sched = &info->ksi_scheds[i]; in ksocknal_choose_scheduler_locked()
680 return sched; in ksocknal_choose_scheduler_locked()
1020 ksock_sched_t *sched; in ksocknal_create_conn() local
1253 sched = ksocknal_choose_scheduler_locked(cpt); in ksocknal_create_conn()
1254 sched->kss_nconns++; in ksocknal_create_conn()
1255 conn->ksnc_scheduler = sched; in ksocknal_create_conn()
1293 (int)(sched - &sched->kss_info->ksi_scheds[0])); in ksocknal_create_conn()
[all …]
Dsocklnd_lib.c689 ksock_sched_t *sched; in ksocknal_lib_memory_pressure() local
691 sched = conn->ksnc_scheduler; in ksocknal_lib_memory_pressure()
692 spin_lock_bh(&sched->kss_lock); in ksocknal_lib_memory_pressure()
707 spin_unlock_bh(&sched->kss_lock); in ksocknal_lib_memory_pressure()
Dsocklnd_proto.c373 ksock_sched_t *sched = conn->ksnc_scheduler; in ksocknal_handle_zcreq() local
377 spin_lock_bh(&sched->kss_lock); in ksocknal_handle_zcreq()
381 spin_unlock_bh(&sched->kss_lock); in ksocknal_handle_zcreq()
/linux-4.4.14/tools/testing/selftests/ftrace/test.d/event/
Dsubsystem-enable.tc19 if [ ! -f set_event -o ! -d events/sched ]; then
38 echo 1 > events/sched/enable
49 echo 0 > events/sched/enable
Devent-enable.tc19 if [ ! -f set_event -o ! -d events/sched ]; then
38 echo 1 > events/sched/sched_switch/enable
49 echo 0 > events/sched/sched_switch/enable
/linux-4.4.14/drivers/staging/lustre/include/linux/libcfs/
Dlibcfs_workitem.h100 void cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
101 int cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
102 void cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
/linux-4.4.14/arch/x86/crypto/
Daes-i586-asm_32.S107 #define do_fcol(table, a1,a2,a3,a4, idx, tmp, sched) \ argument
108 mov 0 sched,%a1; \
110 mov 12 sched,%a2; \
112 mov 4 sched,%a4; \
120 mov 8 sched,%a3; \
126 #define do_icol(table, a1,a2,a3,a4, idx, tmp, sched) \ argument
127 mov 0 sched,%a1; \
129 mov 4 sched,%a2; \
131 mov 12 sched,%a4; \
139 mov 8 sched,%a3; \
/linux-4.4.14/tools/perf/scripts/perl/bin/
Dwakeup-latency-record2 perf record -e sched:sched_switch -e sched:sched_wakeup $@
/linux-4.4.14/arch/powerpc/kernel/
DMakefile19 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
20 CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog
21 CFLAGS_REMOVE_btext.o = -pg -mno-sched-epilog
22 CFLAGS_REMOVE_prom.o = -pg -mno-sched-epilog
24 CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
26 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
/linux-4.4.14/tools/perf/bench/
DBuild1 perf-y += sched-messaging.o
2 perf-y += sched-pipe.o
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
Damdgpu_ctx.c46 rq = &adev->rings[i]->sched.kernel_rq; in amdgpu_ctx_init()
48 rq = &adev->rings[i]->sched.sched_rq; in amdgpu_ctx_init()
49 r = amd_sched_entity_init(&adev->rings[i]->sched, in amdgpu_ctx_init()
58 amd_sched_entity_fini(&adev->rings[j]->sched, in amdgpu_ctx_init()
81 amd_sched_entity_fini(&adev->rings[i]->sched, in amdgpu_ctx_fini()
Damdgpu_sched.c85 job->base.sched = &ring->sched; in amdgpu_sched_ib_submit_kernel_helper()
Damdgpu_sync.c72 ring = container_of(s_fence->sched, struct amdgpu_ring, sched); in amdgpu_sync_same_dev()
Damdgpu_sa.c423 ring = container_of(s_fence->sched, struct amdgpu_ring, sched); in amdgpu_sa_bo_dump_fence()
Damdgpu_ring.c454 return container_of(s_fence->sched, struct amdgpu_ring, sched); in amdgpu_ring_from_fence()
Damdgpu_fence.c501 r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, in amdgpu_fence_driver_init_ring()
569 amd_sched_fini(&ring->sched); in amdgpu_fence_driver_fini()
Damdgpu_cs.c864 job->base.sched = &ring->sched; in amdgpu_cs_ioctl()
Damdgpu.h846 struct amd_gpu_scheduler sched; member
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/o2iblnd/
Do2iblnd.c650 struct kib_sched_info *sched; in kiblnd_create_conn() local
665 sched = kiblnd_data.kib_scheds[cpt]; in kiblnd_create_conn()
667 LASSERT(sched->ibs_nthreads > 0); in kiblnd_create_conn()
777 conn->ibc_sched = sched; in kiblnd_create_conn()
805 spin_lock_irqsave(&sched->ibs_lock, flags); in kiblnd_create_conn()
807 spin_unlock_irqrestore(&sched->ibs_lock, flags); in kiblnd_create_conn()
2435 struct kib_sched_info *sched; in kiblnd_base_shutdown() local
2458 cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) in kiblnd_base_shutdown()
2459 wake_up_all(&sched->ibs_waitq); in kiblnd_base_shutdown()
2562 struct kib_sched_info *sched; in kiblnd_base_startup() local
[all …]
Do2iblnd_cb.c135 struct kib_sched_info *sched = conn->ibc_sched; in kiblnd_drop_rx() local
138 spin_lock_irqsave(&sched->ibs_lock, flags); in kiblnd_drop_rx()
141 spin_unlock_irqrestore(&sched->ibs_lock, flags); in kiblnd_drop_rx()
3239 struct kib_sched_info *sched = conn->ibc_sched; in kiblnd_cq_completion() local
3244 spin_lock_irqsave(&sched->ibs_lock, flags); in kiblnd_cq_completion()
3253 list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns); in kiblnd_cq_completion()
3255 if (waitqueue_active(&sched->ibs_waitq)) in kiblnd_cq_completion()
3256 wake_up(&sched->ibs_waitq); in kiblnd_cq_completion()
3259 spin_unlock_irqrestore(&sched->ibs_lock, flags); in kiblnd_cq_completion()
3275 struct kib_sched_info *sched; in kiblnd_scheduler() local
[all …]
/linux-4.4.14/tools/perf/scripts/perl/
Dwakeup-latency.pl28 sub sched::sched_switch subroutine
51 sub sched::sched_wakeup subroutine
/linux-4.4.14/Documentation/DocBook/
D.device-drivers.xml.cmd2 …/sched.h kernel/sched/core.c kernel/sched/cpupri.c kernel/sched/fair.c include/linux/completion.h …
D.networking.xml.cmd2sched.c net/sunrpc/socklib.c net/sunrpc/stats.c net/sunrpc/rpc_pipe.c net/sunrpc/rpcb_clnt.c net/s…
Ddevice-drivers.xml.db28 API-sched-setscheduler
29 API-sched-setscheduler-nocheck
238 API-synchronize-sched
242 API-get-state-synchronize-sched
243 API-cond-synchronize-sched
244 API-synchronize-sched-expedited
246 API-rcu-barrier-sched
250 API-rcu-read-lock-sched-held
/linux-4.4.14/drivers/usb/host/
Dehci-sched.c1274 struct ehci_iso_sched *sched; in itd_urb_transaction() local
1277 sched = iso_sched_alloc (urb->number_of_packets, mem_flags); in itd_urb_transaction()
1278 if (unlikely (sched == NULL)) in itd_urb_transaction()
1281 itd_sched_init(ehci, sched, stream, urb); in itd_urb_transaction()
1284 num_itds = 1 + (sched->span + 7) / 8; in itd_urb_transaction()
1310 iso_sched_free(stream, sched); in itd_urb_transaction()
1319 list_add (&itd->itd_list, &sched->td_list); in itd_urb_transaction()
1324 urb->hcpriv = sched; in itd_urb_transaction()
1412 struct ehci_iso_sched *sched, in sitd_slot_ok() argument
1505 struct ehci_iso_sched *sched = urb->hcpriv; in iso_stream_schedule() local
[all …]
DMakefile9 fhci-y += fhci-mem.o fhci-tds.o fhci-sched.o
Dfotg210-hcd.c4111 struct fotg210_iso_sched *sched; in itd_urb_transaction() local
4114 sched = iso_sched_alloc(urb->number_of_packets, mem_flags); in itd_urb_transaction()
4115 if (unlikely(sched == NULL)) in itd_urb_transaction()
4118 itd_sched_init(fotg210, sched, stream, urb); in itd_urb_transaction()
4121 num_itds = 1 + (sched->span + 7) / 8; in itd_urb_transaction()
4147 iso_sched_free(stream, sched); in itd_urb_transaction()
4155 list_add(&itd->itd_list, &sched->td_list); in itd_urb_transaction()
4160 urb->hcpriv = sched; in itd_urb_transaction()
4199 struct fotg210_iso_sched *sched = urb->hcpriv; in iso_stream_schedule() local
4202 span = sched->span; in iso_stream_schedule()
[all …]
/linux-4.4.14/Documentation/cgroups/
Dcpusets.txt145 - in sched.c migrate_live_tasks(), to keep migrating tasks within
376 The kernel scheduler (kernel/sched/core.c) automatically load balances
385 has support to partition the systems CPUs into a number of sched
386 domains such that it only load balances within each sched domain.
387 Each sched domain covers some subset of the CPUs in the system;
388 no two sched domains overlap; some CPUs might not be in any sched
391 Put simply, it costs less to balance between two smaller sched domains
395 By default, there is one sched domain covering all CPUs, including those
411 be contained in a single sched domain, ensuring that load balancing
421 enabled, then the scheduler will have one sched domain covering all
[all …]
/linux-4.4.14/Documentation/trace/
Devents.txt48 The events are organized into subsystems, such as ext4, irq, sched,
65 # echo 1 > /sys/kernel/debug/tracing/events/sched/sched_wakeup/enable
69 # echo 0 > /sys/kernel/debug/tracing/events/sched/sched_wakeup/enable
71 To enable all events in sched subsystem:
73 # echo 1 > /sys/kernel/debug/tracing/events/sched/enable
128 # cat /sys/kernel/debug/tracing/events/sched/sched_wakeup/format
211 # cd /sys/kernel/debug/tracing/events/sched/sched_wakeup
261 Clear the filters on all events in the sched subsystem:
263 # cd /sys/kernel/debug/tracing/events/sched
270 Set a filter using only common fields for all events in the sched
[all …]
Dftrace.txt2509 echo 'try_to_wake_up:enable_event:sched:sched_switch:2' > \
2520 echo '!try_to_wake_up:enable_event:sched:sched_switch:0' > \
2522 echo '!schedule:disable_event:sched:sched_switch' > \
2670 # echo 1 > events/sched/enable
2754 # echo 1 > instances/foo/events/sched/sched_wakeup/enable
2755 # echo 1 > instances/foo/events/sched/sched_wakeup_new/enable
2756 # echo 1 > instances/foo/events/sched/sched_switch/enable
/linux-4.4.14/tools/testing/selftests/rcutorture/configs/rcu/
DTREE05.boot1 rcutorture.torture_type=sched
DTREE08.boot1 rcutorture.torture_type=sched
/linux-4.4.14/net/ipv6/
Dip6_flowlabel.c134 unsigned long sched = 0; in ip6_fl_gc() local
156 if (!sched || time_before(ttd, sched)) in ip6_fl_gc()
157 sched = ttd; in ip6_fl_gc()
162 if (!sched && atomic_read(&fl_size)) in ip6_fl_gc()
163 sched = now + FL_MAX_LINGER; in ip6_fl_gc()
164 if (sched) { in ip6_fl_gc()
165 mod_timer(&ip6_fl_gc_timer, sched); in ip6_fl_gc()
/linux-4.4.14/tools/testing/selftests/ftrace/test.d/00basic/
Dbasic4.tc5 grep -q sched available_events && exit 0 || exit $FAIL
/linux-4.4.14/drivers/usb/c67x00/
DMakefile7 c67x00-y := c67x00-drv.o c67x00-ll-hpi.o c67x00-hcd.o c67x00-sched.o
/linux-4.4.14/arch/arm/plat-versatile/
DMakefile4 obj-$(CONFIG_PLAT_VERSATILE_SCHED_CLOCK) += sched-clock.o
/linux-4.4.14/arch/tile/kernel/
Dregs_32.S54 STD_ENTRY_SECTION(__switch_to, .sched.text)
99 STD_ENTRY_SECTION(get_switch_to_pc, .sched.text)
Dregs_64.S54 STD_ENTRY_SECTION(__switch_to, .sched.text)
99 STD_ENTRY_SECTION(get_switch_to_pc, .sched.text)
/linux-4.4.14/arch/powerpc/platforms/powermac/
DMakefile5 CFLAGS_REMOVE_bootx_init.o = -pg -mno-sched-epilog
/linux-4.4.14/arch/arc/kernel/
Dctx_sw_asm.S21 .section .sched.text,"ax",@progbits
/linux-4.4.14/Documentation/RCU/
Dlockdep.txt15 rcu_read_lock_sched_held() for RCU-sched.
31 Check for RCU-sched read-side critical section.
45 is invoked by both RCU-sched readers and updaters.
Dstallwarn.txt65 and that the stall was affecting RCU-sched. This message will normally be
67 RCU and RCU-sched are implemented by the same underlying mechanism,
192 result in RCU-sched and RCU-bh stalls.
195 result in RCU-sched stalls and, if ksoftirqd is in use, RCU-bh
199 result in RCU-sched and RCU-bh stalls.
251 The RCU, RCU-sched, RCU-bh, and RCU-tasks implementations have CPU stall
DNMI-RCU.txt120 being protected by RCU-sched.
Dtorture.txt184 "sched": preempt_disable(), preempt_enable(), and
DwhatisRCU.txt856 sched: Critical sections Grace period Barrier
908 If so, RCU-sched is the only choice that will work for you.
Dtrace.txt22 For CONFIG_TREE_RCU, the RCU flavor maps onto the RCU-sched flavor,
/linux-4.4.14/net/sunrpc/
DMakefile10 sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
/linux-4.4.14/drivers/infiniband/hw/cxgb3/
Diwch_cm.c2203 static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) in sched() function
2239 [CPL_ACT_ESTABLISH] = sched,
2240 [CPL_ACT_OPEN_RPL] = sched,
2241 [CPL_RX_DATA] = sched,
2242 [CPL_TX_DMA_ACK] = sched,
2243 [CPL_ABORT_RPL_RSS] = sched,
2244 [CPL_ABORT_RPL] = sched,
2245 [CPL_PASS_OPEN_RPL] = sched,
2246 [CPL_CLOSE_LISTSRV_RPL] = sched,
2247 [CPL_PASS_ACCEPT_REQ] = sched,
[all …]
/linux-4.4.14/drivers/input/joystick/
Dsidewinder.c138 int timeout, bitout, sched, i, kick, start, strobe; in sw_read_packet() local
148 sched = 0; in sw_read_packet()
168 sched--; in sw_read_packet()
181 sched = kick; /* Schedule second trigger */ in sw_read_packet()
186 if (pending && sched < 0 && (i > -SW_END)) { /* Second trigger time */ in sw_read_packet()
/linux-4.4.14/kernel/time/
DMakefile11 obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o tick-sched.o
/linux-4.4.14/arch/unicore32/
DMakefile30 KBUILD_CFLAGS += -mno-sched-prolog
/linux-4.4.14/arch/xtensa/kernel/
Dvmlinux.lds.S94 *(.sched.literal .sched.text)
/linux-4.4.14/Documentation/ABI/testing/
Dsysfs-kernel-uids14 Documentation/scheduler/sched-design-CFS.txt
/linux-4.4.14/Documentation/block/
D00-INDEX25 switching-sched.txt
Ddeadline-iosched.txt10 Refer to Documentation/block/switching-sched.txt for information on
/linux-4.4.14/arch/powerpc/platforms/cell/spufs/
DMakefile4 spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb/
Dsge.c235 struct sched { struct
271 struct sched *tx_sched;
284 struct sched *s = sge->tx_sched; in tx_sched_stop()
300 struct sched *s = sge->tx_sched; in t1_sched_update_parms()
345 struct sched *s = sge->tx_sched;
360 struct sched *s = sge->tx_sched;
373 struct sched *s; in tx_sched_init()
376 s = kzalloc(sizeof (struct sched), GFP_KERNEL); in tx_sched_init()
399 struct sched *s = sge->tx_sched; in sched_update_avail()
434 struct sched *s = sge->tx_sched; in sched_skb()
/linux-4.4.14/net/rxrpc/
Dar-call.c757 bool sched; in rxrpc_mark_call_released() local
761 sched = false; in rxrpc_mark_call_released()
767 sched = true; in rxrpc_mark_call_released()
770 sched = true; in rxrpc_mark_call_released()
771 if (sched) in rxrpc_mark_call_released()
/linux-4.4.14/net/
DMakefile15 obj-$(CONFIG_NET) += ethernet/ 802/ sched/ netlink/
DKconfig225 source "net/sched/Kconfig"
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb3/
Dcxgb3_ioctl.h101 uint8_t sched; member
Dcxgb3_main.c801 char *buf, int sched) in tm_attr_show() argument
808 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; in tm_attr_show()
812 if (sched & 1) in tm_attr_show()
827 const char *buf, size_t len, int sched) in tm_attr_store() argument
843 ret = t3_config_sched(adap, val, sched); in tm_attr_store()
850 #define TM_ATTR(name, sched) \ argument
854 return tm_attr_show(d, buf, sched); \
859 return tm_attr_store(d, buf, len, sched); \
939 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, in send_pktsched_cmd() argument
955 req->sched = sched; in send_pktsched_cmd()
Dcommon.h732 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
Dt3_hw.c3002 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched) in t3_config_sched() argument
3028 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2); in t3_config_sched()
3030 if (sched & 1) in t3_config_sched()
Dt3_cpl.h889 __u8 sched; member
/linux-4.4.14/Documentation/
Dstable_kernel_rules.txt81 Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
82 Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
83 Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
Dmagic-number.txt146 HTB_CMAGIC 0xFEFAFEF1 htb_class net/sched/sch_htb.c
Dstatic-keys.txt251 'pipe-test' (also known as 'perf bench sched pipe') can be used to show the
Dunshare.txt122 #include <sched.h>
Dkernel-parameters.txt3065 for RCU-preempt, and "s" for RCU-sched, and "N"
3324 Run the RCU sched early boot self tests
3802 frequency tracepoints such as irq or sched, can cause
/linux-4.4.14/net/sched/
Dsch_hfsc.c122 struct hfsc_sched *sched; /* scheduler data */ member
196 struct rb_node **p = &cl->sched->eligible.rb_node; in eltree_insert()
209 rb_insert_color(&cl->el_node, &cl->sched->eligible); in eltree_insert()
215 rb_erase(&cl->el_node, &cl->sched->eligible); in eltree_remove()
861 list_add_tail(&cl->dlist, &cl->sched->droplist); in set_active()
1088 cl->sched = q; in hfsc_change_class()
1450 q->root.sched = q; in hfsc_init_qdisc()
DKconfig55 See the top of <file:net/sched/sch_cbq.c> for more details.
96 See the top of <file:net/sched/sch_atm.c> for more details.
125 See the top of <file:net/sched/sch_red.c> for more details.
136 See the top of <file:net/sched/sch_sfb.c> for more details.
147 See the top of <file:net/sched/sch_sfq.c> for more details.
159 See the top of <file:net/sched/sch_teql.c> for more details.
170 See the top of <file:net/sched/sch_tbf.c> for more details.
180 (see the top of <file:net/sched/sch_red.c> for details and
Dsch_cbq.c492 psched_time_t sched = q->now; in cbq_ovl_delay() local
503 sched += delay + cl->penalty; in cbq_ovl_delay()
504 cl->penalized = sched; in cbq_ovl_delay()
508 expires = ns_to_ktime(PSCHED_TICKS2NS(sched)); in cbq_ovl_delay()
557 psched_time_t sched = now; in cbq_undelay_prio() local
580 } else if (sched - cl->penalized > 0) in cbq_undelay_prio()
581 sched = cl->penalized; in cbq_undelay_prio()
584 return sched - now; in cbq_undelay_prio()
/linux-4.4.14/drivers/input/serio/
Dhil_mlc.c737 goto sched; in hilse_donode()
742 if (tv.tv_usec >= mlc->intimeout) goto sched; in hilse_donode()
744 if (!tv.tv_usec) goto sched; in hilse_donode()
747 sched: in hilse_donode()
/linux-4.4.14/tools/power/cpupower/po/
Dde.po690 " -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
696 " -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
707 msgid "--sched-mc param out of range [0-%d]\n"
712 msgid "--sched-smt param out of range [0-%d]\n"
717 msgid "Error setting sched-mc %s\n"
722 msgid "Error setting sched-smt %s\n"
744 msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
750 " -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
Dit.po687 " -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
693 " -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
704 msgid "--sched-mc param out of range [0-%d]\n"
709 msgid "--sched-smt param out of range [0-%d]\n"
714 msgid "Error setting sched-mc %s\n"
719 msgid "Error setting sched-smt %s\n"
741 msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
748 " -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
Dcs.po679 " -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
685 " -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
696 msgid "--sched-mc param out of range [0-%d]\n"
701 msgid "--sched-smt param out of range [0-%d]\n"
706 msgid "Error setting sched-mc %s\n"
711 msgid "Error setting sched-smt %s\n"
733 msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
739 " -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
Dfr.po679 " -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
685 " -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
696 msgid "--sched-mc param out of range [0-%d]\n"
701 msgid "--sched-smt param out of range [0-%d]\n"
706 msgid "Error setting sched-mc %s\n"
711 msgid "Error setting sched-smt %s\n"
733 msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
739 " -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
Dpt.po689 " -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n"
695 " -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler "
706 msgid "--sched-mc param out of range [0-%d]\n"
711 msgid "--sched-smt param out of range [0-%d]\n"
716 msgid "Error setting sched-mc %s\n"
721 msgid "Error setting sched-smt %s\n"
743 msgid " -m, --sched-mc Gets the kernel's multi core scheduler policy.\n"
750 " -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n"
/linux-4.4.14/tools/testing/selftests/rcutorture/doc/
DTREE_RCU-kconfig.txt30 RCU-sched: Do one with PREEMPT but not BOOST.
/linux-4.4.14/drivers/staging/lustre/lnet/selftest/
Dselftest.h473 swi_action_t action, struct cfs_wi_sched *sched) in swi_init_workitem() argument
475 swi->swi_sched = sched; in swi_init_workitem()
/linux-4.4.14/tools/perf/tests/
DBuild12 perf-y += evsel-tp-sched.o
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
Dcm.c4055 static int sched(struct c4iw_dev *dev, struct sk_buff *skb) in sched() function
4102 sched(dev, skb); in fw6_msg()
4149 sched(dev, skb); in peer_abort_intr()
4158 [CPL_ACT_ESTABLISH] = sched,
4159 [CPL_ACT_OPEN_RPL] = sched,
4160 [CPL_RX_DATA] = sched,
4161 [CPL_ABORT_RPL_RSS] = sched,
4162 [CPL_ABORT_RPL] = sched,
4163 [CPL_PASS_OPEN_RPL] = sched,
4164 [CPL_CLOSE_LISTSRV_RPL] = sched,
[all …]
/linux-4.4.14/lib/
Ddebugobjects.c194 int sched = 0; in free_object() local
202 sched = keventd_up(); in free_object()
207 if (sched) in free_object()
/linux-4.4.14/tools/testing/selftests/rcutorture/bin/
Dkvm.sh407 elif test "$dryrun" = sched
/linux-4.4.14/kernel/
DMakefile25 obj-y += sched/
/linux-4.4.14/tools/virtio/virtio-trace/
DREADME98 # echo 1 > /sys/kernel/debug/tracing/events/sched/enable
/linux-4.4.14/Documentation/zh_CN/
Dmagic-number.txt146 HTB_CMAGIC 0xFEFAFEF1 htb_class net/sched/sch_htb.c
/linux-4.4.14/arch/powerpc/
DMakefile205 KBUILD_CFLAGS += -mno-sched-epilog
/linux-4.4.14/drivers/staging/wlan-ng/
Dhfa384x_usb.c2789 int sched; in hfa384x_tx_timeout() local
2791 sched = !test_and_set_bit(WORK_TX_HALT, &hw->usb_flags); in hfa384x_tx_timeout()
2792 sched |= !test_and_set_bit(WORK_RX_HALT, &hw->usb_flags); in hfa384x_tx_timeout()
2793 if (sched) in hfa384x_tx_timeout()
/linux-4.4.14/arch/mips/
DMakefile125 cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
/linux-4.4.14/arch/arm/
DMakefile44 KBUILD_CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
/linux-4.4.14/include/trace/events/
Dsched.h2 #define TRACE_SYSTEM sched
/linux-4.4.14/include/asm-generic/
Dvmlinux.lds.h436 *(.sched.text) \
/linux-4.4.14/Documentation/locking/
Dspinlocks.txt140 handling in kernel/sched/core.c - nothing ever _changes_ a wait-queue from
Drt-mutex-design.txt387 Note that rt_mutex_setprio is defined in kernel/sched/core.c to implement the
/linux-4.4.14/net/unix/
Daf_unix.c1188 int sched; in unix_wait_for_peer() local
1193 sched = !sock_flag(other, SOCK_DEAD) && in unix_wait_for_peer()
1199 if (sched) in unix_wait_for_peer()
/linux-4.4.14/Documentation/vm/
Dnuma101 structures [see Documentation/scheduler/sched-domains.txt]--and the scheduler
/linux-4.4.14/drivers/s390/cio/
Dqdio_main.c861 goto sched; in __qdio_outbound_processing()
878 sched: in __qdio_outbound_processing()
/linux-4.4.14/include/uapi/linux/
DKbuild361 header-y += sched.h
/linux-4.4.14/include/net/
Dip_vs.h1358 struct ip_vs_scheduler *sched);
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
Dresource_tracker.c675 u8 sched = *(u8 *)(inbox->buf + 64); in update_pkey_index() local
681 port = (sched >> 6 & 1) + 1; in update_pkey_index()
3727 u8 sched = *(u8 *)(inbox->buf + 64); in roce_verify_mac() local
3730 port = (sched >> 6 & 1) + 1; in roce_verify_mac()
/linux-4.4.14/init/
DKconfig714 "s" for RCU-sched. Nothing prevents this kthread from running
935 - Documentation/scheduler/sched-design-CFS.txt (CFS)
1104 See tip/Documentation/scheduler/sched-bwc.txt for more information.
1115 See Documentation/scheduler/sched-rt-group.txt for more information.
/linux-4.4.14/Documentation/timers/
DNO_HZ.txt344 load, maintaining sched average, computing CFS entity vruntime,
/linux-4.4.14/Documentation/filesystems/
Dspufs.txt297 ate(2) to address a specific SPU context. When the context gets sched-
Dproc.txt1869 own. Sensitive files like cmdline, sched*, status are now protected against
/linux-4.4.14/Documentation/ia64/
Derr_inject.txt92 #include <sched.h>
/linux-4.4.14/Documentation/s390/
DDebugging390.txt349 defined in linux/include/linux/sched.h
983 Now make CC:="s390-gcc -g" kernel/sched.s
985 ( task_struct is defined in include/linux/sched.h ).
/linux-4.4.14/
DMAINTAINERS7322 F: net/sched/sch_netem.c
9394 T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
9396 F: kernel/sched/
9397 F: include/linux/sched.h
9398 F: include/uapi/linux/sched.h
10393 F: net/sched/
DCREDITS718 D: Assorted sched/mm titbits