Searched refs:load (Results 1 - 200 of 2209) sorted by relevance

1234567891011>>

/linux-4.1.27/arch/alpha/include/asm/
H A Dxor.h62 xor $0,$1,$0 # 7 cycles from $1 load \n\
119 xor $0,$1,$1 # 8 cycles from $0 load \n\
120 xor $3,$4,$4 # 6 cycles from $4 load \n\
121 xor $6,$7,$7 # 6 cycles from $7 load \n\
122 xor $21,$22,$22 # 5 cycles from $22 load \n\
124 xor $1,$2,$2 # 9 cycles from $2 load \n\
125 xor $24,$25,$25 # 5 cycles from $25 load \n\
127 xor $4,$5,$5 # 6 cycles from $5 load \n\
130 xor $7,$20,$20 # 7 cycles from $20 load \n\
132 xor $22,$23,$23 # 7 cycles from $23 load \n\
135 xor $25,$27,$27 # 7 cycles from $27 load \n\
150 xor $0,$1,$1 # 4 cycles from $1 load \n\
151 xor $3,$4,$4 # 5 cycles from $4 load \n\
152 xor $6,$7,$7 # 5 cycles from $7 load \n\
154 xor $1,$2,$2 # 4 cycles from $2 load \n\
155 xor $4,$5,$5 # 5 cycles from $5 load \n\
157 xor $7,$20,$20 # 4 cycles from $20 load \n\
193 xor $0,$1,$1 # 6 cycles from $1 load \n\
195 xor $2,$3,$3 # 6 cycles from $3 load \n\
200 xor $4,$5,$5 # 7 cycles from $5 load \n\
204 xor $21,$22,$22 # 7 cycles from $22 load \n\
208 xor $23,$24,$24 # 7 cycles from $24 load \n\
215 xor $25,$27,$27 # 8 cycles from $27 load \n\
223 xor $0,$1,$1 # 9 cycles from $1 load \n\
224 xor $2,$3,$3 # 5 cycles from $3 load \n\
228 xor $4,$5,$5 # 5 cycles from $5 load \n\
239 xor $6,$7,$7 # 8 cycles from $6 load \n\
243 xor $21,$22,$22 # 8 cycles from $22 load \n\
245 xor $23,$24,$24 # 5 cycles from $24 load \n\
248 xor $25,$27,$27 # 5 cycles from $27 load \n\
250 xor $0,$1,$1 # 5 cycles from $1 load \n\
253 xor $2,$3,$3 # 4 cycles from $3 load \n\
290 xor $0,$1,$1 # 6 cycles from $1 load \n\
292 xor $2,$3,$3 # 6 cycles from $3 load \n\
297 xor $3,$4,$4 # 7 cycles from $4 load \n\
300 xor $5,$6,$6 # 7 cycles from $6 load \n\
301 xor $7,$22,$22 # 7 cycles from $22 load \n\
302 xor $6,$23,$23 # 7 cycles from $23 load \n\
307 xor $24,$25,$25 # 8 cycles from $25 load \n\
310 xor $25,$27,$27 # 8 cycles from $27 load \n\
312 xor $28,$0,$0 # 7 cycles from $0 load \n\
320 xor $1,$2,$2 # 6 cycles from $2 load \n\
322 xor $3,$4,$4 # 4 cycles from $4 load \n\
332 xor $4,$5,$5 # 7 cycles from $5 load \n\
335 xor $6,$7,$7 # 7 cycles from $7 load \n\
340 xor $7,$22,$22 # 7 cycles from $22 load \n\
342 xor $23,$24,$24 # 6 cycles from $24 load \n\
347 xor $25,$27,$27 # 7 cycles from $27 load \n\
350 xor $27,$28,$28 # 8 cycles from $28 load \n\
352 xor $0,$1,$1 # 6 cycles from $1 load \n\
360 xor $2,$3,$3 # 9 cycles from $3 load \n\
361 xor $3,$4,$4 # 9 cycles from $4 load \n\
362 xor $5,$6,$6 # 8 cycles from $6 load \n\
366 xor $7,$22,$22 # 7 cycles from $22 load \n\
367 xor $23,$24,$24 # 6 cycles from $24 load \n\
372 xor $24,$25,$25 # 8 cycles from $25 load \n\
425 xor $0,$1,$0 # 8 cycles from $1 load \n\
497 xor $0,$1,$1 # 8 cycles from $0 load \n\
498 xor $3,$4,$4 # 7 cycles from $4 load \n\
499 xor $6,$7,$7 # 6 cycles from $7 load \n\
500 xor $21,$22,$22 # 5 cycles from $22 load \n\
502 xor $1,$2,$2 # 9 cycles from $2 load \n\
503 xor $24,$25,$25 # 5 cycles from $25 load \n\
505 xor $4,$5,$5 # 6 cycles from $5 load \n\
508 xor $7,$20,$20 # 7 cycles from $20 load \n\
510 xor $22,$23,$23 # 7 cycles from $23 load \n\
513 xor $25,$27,$27 # 7 cycles from $27 load \n\
532 xor $0,$1,$1 # 6 cycles from $1 load \n\
533 xor $3,$4,$4 # 5 cycles from $4 load \n\
534 xor $6,$7,$7 # 5 cycles from $7 load \n\
535 xor $1,$2,$2 # 4 cycles from $2 load \n\
537 xor $4,$5,$5 # 5 cycles from $5 load \n\
538 xor $7,$20,$20 # 4 cycles from $20 load \n\
595 xor $0,$1,$1 # 6 cycles from $1 load \n\
597 xor $2,$3,$3 # 6 cycles from $3 load \n\
602 xor $4,$5,$5 # 7 cycles from $5 load \n\
606 xor $21,$22,$22 # 7 cycles from $22 load \n\
610 xor $23,$24,$24 # 7 cycles from $24 load \n\
617 xor $25,$27,$27 # 8 cycles from $27 load \n\
625 xor $0,$1,$1 # 9 cycles from $1 load \n\
626 xor $2,$3,$3 # 5 cycles from $3 load \n\
630 xor $4,$5,$5 # 5 cycles from $5 load \n\
642 xor $6,$7,$7 # 8 cycles from $6 load \n\
645 xor $21,$22,$22 # 8 cycles from $22 load \n\
650 xor $23,$24,$24 # 6 cycles from $24 load \n\
652 xor $25,$27,$27 # 6 cycles from $27 load \n\
656 xor $0,$1,$1 # 7 cycles from $1 load \n\
657 xor $2,$3,$3 # 6 cycles from $3 load \n\
721 xor $0,$1,$1 # 6 cycles from $1 load \n\
723 xor $2,$3,$3 # 6 cycles from $3 load \n\
728 xor $3,$4,$4 # 7 cycles from $4 load \n\
731 xor $5,$6,$6 # 7 cycles from $6 load \n\
732 xor $7,$22,$22 # 7 cycles from $22 load \n\
733 xor $6,$23,$23 # 7 cycles from $23 load \n\
738 xor $24,$25,$25 # 8 cycles from $25 load \n\
741 xor $25,$27,$27 # 8 cycles from $27 load \n\
743 xor $28,$0,$0 # 7 cycles from $0 load \n\
751 xor $1,$2,$2 # 6 cycles from $2 load \n\
753 xor $3,$4,$4 # 4 cycles from $4 load \n\
763 xor $4,$5,$5 # 7 cycles from $5 load \n\
766 xor $6,$7,$7 # 7 cycles from $7 load \n\
771 xor $7,$22,$22 # 7 cycles from $22 load \n\
773 xor $23,$24,$24 # 6 cycles from $24 load \n\
778 xor $25,$27,$27 # 7 cycles from $27 load \n\
781 xor $27,$28,$28 # 8 cycles from $28 load \n\
783 xor $0,$1,$1 # 6 cycles from $1 load \n\
793 xor $2,$3,$3 # 9 cycles from $3 load \n\
796 xor $3,$4,$4 # 9 cycles from $4 load \n\
798 xor $5,$6,$6 # 8 cycles from $6 load \n\
802 xor $7,$22,$22 # 7 cycles from $22 load \n\
803 xor $23,$24,$24 # 6 cycles from $24 load \n\
808 xor $24,$25,$25 # 8 cycles from $25 load \n\
/linux-4.1.27/arch/sh/boot/romimage/
H A DMakefile8 load-y := 0
10 mmcif-load-$(CONFIG_CPU_SUBTYPE_SH7724) := 0xe5200000 # ILRAM
12 load-$(CONFIG_ROMIMAGE_MMCIF) := $(mmcif-load-y)
15 LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(load-y) -e romstart \
H A Dhead.S16 /* load the romImage to above the empty zero page */
H A Dmmcif-sh7724.c63 /* load kernel via MMCIF interface */ mmcif_loader()
/linux-4.1.27/arch/x86/um/
H A Dstub_32.S8 /* load pointer to first operation */
12 /* load length of additional data */
27 /* load syscall-# */
30 /* load syscall params */
H A Dstub_64.S23 /* load pointer to first operation */
27 /* load length of additional data */
42 /* load syscall-# */
45 /* load syscall params */
/linux-4.1.27/net/core/
H A Dptp_classifier.c20 * ldh [12] ; load ethertype
25 * ldb [23] ; load proto
27 * ldh [20] ; load frag offset field
29 * ldxb 4*([14]&0xf) ; load IP header len
30 * ldh [x + 16] ; load UDP dst port
32 * ldh [x + 22] ; load payload
41 * ldb [20] ; load proto
43 * ldh [56] ; load UDP dst port
45 * ldh [62] ; load payload
54 * ldh [16] ; load inner type
56 * ldb [18] ; load payload
67 * ldb [27] ; load proto
69 * ldh [24] ; load frag offset field
71 * ldxb 4*([18]&0xf) ; load IP header len
72 * ldh [x + 20] ; load UDP dst port
74 * ldh [x + 26] ; load payload
83 * ldb [24] ; load proto
85 * ldh [60] ; load UDP dst port
87 * ldh [66] ; load payload
96 * ldb [14] ; load payload
/linux-4.1.27/tools/power/cpupower/bench/
H A Dbenchmark.c38 * to get the given load time
40 * @param load aimed load time in µs
45 unsigned int calculate_timespace(long load, struct config *config) calculate_timespace() argument
54 printf("calibrating load of %lius, please wait...\n", load); calculate_timespace()
63 /* approximation of the wanted load time by comparing with the calculate_timespace()
66 rounds = (unsigned int)(load * estimated / timed); calculate_timespace()
83 * generates a specific sleep an load time with the performance
101 load_time = config->load; start_benchmark()
105 total_time += _round * (config->sleep + config->load); start_benchmark()
120 * _rounds should produce a load which matches the configured start_benchmark()
121 * load time */ start_benchmark()
137 /* do some sleep/load cycles with the performance governor */ start_benchmark()
147 "load: %lius, rounds: %u\n", start_benchmark()
162 /* again, do some sleep/load cycles with the start_benchmark()
173 "load: %lius, rounds: %u\n", start_benchmark()
180 /* compare the avarage sleep/load cycles */ start_benchmark()
H A Dparse.h24 long load; /* load time in µs */ member in struct:config
28 * load time after every round in µs */
29 unsigned int cycles; /* calculation cycles with the same sleep/load time */
30 unsigned int rounds; /* calculation rounds with iterated sleep/load time */
H A Dmain.c34 {"load", 1, 0, 'l'},
42 {"load-step", 1, 0, 'x'},
56 printf(" -l, --load=<long int>\t\tinitial load time in us\n"); usage()
58 printf(" -x, --load-step=<long int>\ttime to be added to load time, in us\n"); usage()
110 sscanf(optarg, "%li", &config->load); main()
111 dprintf("user load time -> %s\n", optarg); main()
174 "load=%li\n\t" main()
182 config->load, main()
H A Dbenchmark.h20 /* load loop, this schould take about 1 to 2ms to complete */
H A Dconfig.h20 /* initial loop count for the load calibration */
H A Dparse.c112 fprintf(output, "#round load sleep performance powersave percentage\n"); prepare_output()
130 config->load = 500000; prepare_default_config()
189 else if (strcmp("load", opt) == 0) prepare_config()
190 sscanf(val, "%li", &config->load); prepare_config()
H A Dcpufreq-bench_plot.sh79 echo "set xlabel \"sleep/load time\"" >> $dir/plot_script.gpl
89 # Parse out load time (which must be equal to sleep time for a plot), divide it by 1000
/linux-4.1.27/arch/arm/include/debug/
H A D8250.S22 .macro load, rd, rx:vararg
30 .macro load, rd, rx:vararg
42 1002: load \rd, [\rx, #UART_LSR << UART_SHIFT]
50 1001: load \rd, [\rx, #UART_MSR << UART_SHIFT]
H A Dexynos.S20 * aligned and add in the offset when we load the value here.
H A Ds5pv210.S19 * aligned and add in the offset when we load the value here.
/linux-4.1.27/arch/x86/include/asm/
H A Dxor_64.h17 We may also be able to load into the L1 only depending on how the cpu
18 deals with a load to a line that is being prefetched. */
H A Dmmu_context.h108 * Re-load page tables. switch_mm()
113 * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI. switch_mm()
115 * CPU 1: load from the PTE that CPU 0 writes (implicit) switch_mm()
122 * The bad outcome can occur if either CPU's load is switch_mm()
127 * store to mm_cpumask and any operation that could load switch_mm()
H A Dmach_timer.h30 * load 5 * LATCH count, (LSB and MSB) to begin countdown. mach_prepare_counter()
/linux-4.1.27/include/linux/
H A Dinitrd.h4 /* 1 = load ramdisk, 0 = don't load */
H A Dflat.h37 # define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
41 # define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
45 # define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
H A Dtorture.h73 /* Task stuttering, which forces load/no-load transitions. */
H A Delf-fdpic.h1 /* FDPIC ELF load map
H A Dfrontswap.h11 int (*load)(unsigned, pgoff_t, struct page *); member in struct:frontswap_ops
/linux-4.1.27/kernel/sched/
H A Dproc.c4 * Kernel load calculations, forked from sched/core.c
12 * Global load-average calculations
14 * We take a distributed and async approach to calculating the global load-avg
17 * The global load average is an exponentially decaying average of nr_running +
65 * get_avenrun - get the load average array
66 * @loads: pointer to dest load array
98 calc_load(unsigned long load, unsigned long exp, unsigned long active) calc_load() argument
102 newload = load * exp + active * (FIXED_1 - exp); calc_load()
103 if (active >= load) calc_load()
111 * Handle NO_HZ for the global load-average.
114 * load-average relies on per-cpu sampling from the tick, it is affected by
127 * when the window starts, thus separating old and new NO_HZ load.
288 calc_load_n(unsigned long load, unsigned long exp, calc_load_n() argument
292 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); calc_load_n()
299 * in the pending idle delta if our idle period crossed a load cycle boundary.
343 * calc_load - update the avenrun load estimates 10 ticks after the calc_global_nohz()
394 * End of global load-average stuff
399 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
403 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
404 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
407 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
408 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
411 * degrade_zero_ticks is the number of ticks after which load at any
413 * degrade_factor is a precomputed table, a row for each load idx.
417 * row 2, col 3 (=12) says that the degradation at load idx 2 after
420 * With this power of 2 load factors, we can degrade the load n times
437 * would be when CPU is idle and so we just decay the old load without
438 * adding any new load.
441 decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) decay_load_missed() argument
446 return load; decay_load_missed()
452 return load >> missed_updates; decay_load_missed()
456 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT; decay_load_missed()
461 return load; decay_load_missed()
476 /* Update our load: */ __update_cpu_load()
487 * Round up the averaging division if load is increasing. This __update_cpu_load()
488 * prevents us from getting stuck on 9 if the load is 10, for __update_cpu_load()
508 return rq->load.weight; get_rq_runnable_load()
519 * would seriously skew the load calculation. However we'll make do for those
527 * Called from nohz_idle_balance() to update the load ratings before doing the
533 unsigned long load = get_rq_runnable_load(this_rq); update_idle_cpu_load() local
537 * bail if there's load or we're actually up-to-date. update_idle_cpu_load()
539 if (load || curr_jiffies == this_rq->last_load_update_tick) update_idle_cpu_load()
545 __update_cpu_load(this_rq, load, pending_updates); update_idle_cpu_load()
565 * We were idle, this means load 0, the current load might be update_cpu_load_nohz()
579 unsigned long load = get_rq_runnable_load(this_rq); update_cpu_load_active() local
584 __update_cpu_load(this_rq, load, 1); update_cpu_load_active()
H A Dfair.c97 * The exponential sliding window over which load is averaged for shares
308 /* We should have no load, but we need to update last_decay. */ list_add_leaf_cfs_rq()
603 if (unlikely(se->load.weight != NICE_0_LOAD)) calc_delta_fair()
604 delta = __calc_delta(delta, NICE_0_LOAD, &se->load); calc_delta_fair()
641 struct load_weight *load; for_each_sched_entity() local
645 load = &cfs_rq->load; for_each_sched_entity()
648 lw = cfs_rq->load; for_each_sched_entity()
650 update_load_add(&lw, se->load.weight); for_each_sched_entity()
651 load = &lw; for_each_sched_entity()
653 slice = __calc_delta(slice, se->load.weight, load); for_each_sched_entity()
675 /* Give new task start runnable values to heavy its load in infant time */ init_task_runnable_average()
1105 * heavily used ones, spreading the load around. should_numa_migrate_memory()
1120 unsigned long load; member in struct:numa_stats
1143 ns->load += weighted_cpuload(cpu); for_each_cpu()
1208 * The load is corrected for the CPU capacity available on each node. load_too_imbalanced()
1234 orig_src_load = env->src_stats.load; load_too_imbalanced()
1237 * In a task swap, there will be one load moving from src to dst, load_too_imbalanced()
1266 long load; task_numa_compare() local
1351 * In the overloaded case, try and keep the load balanced. task_numa_compare()
1354 load = task_h_load(env->p); task_numa_compare()
1355 dst_load = env->dst_stats.load + load; task_numa_compare()
1356 src_load = env->src_stats.load - load; task_numa_compare()
1376 load = task_h_load(cur); task_numa_compare()
1377 dst_load -= load; task_numa_compare()
1378 src_load += load; task_numa_compare()
2292 update_load_add(&cfs_rq->load, se->load.weight); account_entity_enqueue()
2294 update_load_add(&rq_of(cfs_rq)->load, se->load.weight); account_entity_enqueue()
2309 update_load_sub(&cfs_rq->load, se->load.weight); account_entity_dequeue()
2311 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight); account_entity_dequeue()
2332 tg_weight += cfs_rq->load.weight; calc_tg_weight()
2339 long tg_weight, load, shares; calc_cfs_shares() local
2342 load = cfs_rq->load.weight; calc_cfs_shares()
2344 shares = (tg->shares * load); calc_cfs_shares()
2371 update_load_set(&se->load, weight); reweight_entity()
2390 if (likely(se->load.weight == tg->shares)) update_cfs_shares()
2409 #define LOAD_AVG_MAX 47742 /* maximum possible load avg */
2506 * following representation of historical load:
2512 * This means that the contribution to load ~32ms ago (u_32) will be weighted
2513 * approximately half as much as the contribution to load within the last ms
2642 * representation for computing load contributions.
2676 * load as a task of equal weight. __update_group_entity_contrib()
2723 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight); __update_task_entity_contrib()
2815 * Decay the load contributed by all blocked children and account this so that
2843 /* Add the load generated by se into cfs_rq's child load-average */ enqueue_entity_load_avg()
2879 /* migrated tasks did not contribute to our blocked load */ enqueue_entity_load_avg()
2887 /* we force update consideration on load-balancer moves */ enqueue_entity_load_avg()
2892 * Remove se's load from this cfs_rq child load-average, if the entity is
2901 /* we force update consideration on load-balancer moves */ dequeue_entity_load_avg()
2913 * Update the rq's load with the elapsed running time before entering
2923 * Update the rq's load with the elapsed idle time before a task is
3216 * This also mitigates buddy induced latencies under load. check_preempt_tick()
3250 * Track our maximum slice length, if the CPU's load is at set_next_entity()
3254 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { set_next_entity()
3586 * load-balance operations.
3656 if (qcfs_rq->load.weight) for_each_sched_entity()
3698 if (!cfs_rq->load.weight) unthrottle_cfs_rq()
4292 if (cfs_rq->load.weight) { for_each_sched_entity()
4300 /* avoid re-evaluating load for this entity */ for_each_sched_entity()
4333 * Return a low guess at the load of a migration-source cpu weighted
4336 * We want to under-estimate the load of migration sources, to
4351 * Return a high guess at the load of a migration-target cpu weighted
4429 * effective_load() calculates the load change as seen from the root_task_group
4431 * Adding load to a group doesn't make a group heavier, but can cause movement
4435 * Calculate the effective load difference if @wl is added (subtracted) to @tg
4451 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
4461 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
4498 w = se->my_q->load.weight + wl; for_each_sched_entity()
4509 * Per the above, wl is the new se->load.weight value; since for_each_sched_entity()
4519 wl -= se->load.weight; for_each_sched_entity()
4523 * the final effective load change on the root group. Since for_each_sched_entity()
4566 s64 this_load, load; wake_affine() local
4583 load = source_load(prev_cpu, idx); wake_affine()
4588 * effect of the currently running task from the load wake_affine()
4593 weight = current->se.load.weight; wake_affine()
4596 load += effective_load(tg, prev_cpu, 0, -weight); wake_affine()
4600 weight = p->se.load.weight; wake_affine()
4603 * In low-load situations, where prev_cpu is idle and this_cpu is idle wake_affine()
4608 * Otherwise check if either cpus are near enough in load to allow this wake_affine()
4621 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); wake_affine()
4654 unsigned long load, avg_load; find_idlest_group() local
4666 /* Tally up the load of all CPUs in the group */ find_idlest_group()
4672 load = source_load(i, load_idx); for_each_cpu()
4674 load = target_load(i, load_idx); for_each_cpu()
4676 avg_load += load; for_each_cpu()
4701 unsigned long load, min_load = ULONG_MAX; find_idlest_cpu() local
4733 load = weighted_cpuload(i); for_each_cpu_and()
4734 if (load < min_load || (load == min_load && i == this_cpu)) { for_each_cpu_and()
4735 min_load = load; for_each_cpu_and()
4821 * Balances load by selecting the idlest cpu in the idlest group, or under
4920 * Load tracking: accumulate removed load so that it can be processed migrate_task_rq_fair()
4922 * to blocked load iff they have a positive decay-count. It can never migrate_task_rq_fair()
5287 * Fair scheduling class load-balancing methods.
5291 * The purpose of load-balancing is to achieve the same basic fairness the
5335 * of load-balance at each level inv. proportional to the number of cpus in
5344 * | | `- number of cpus doing load-balance
5426 /* The set of CPUs under consideration for load-balancing */
5590 * meet load balance goals by pulling other tasks on src_cpu. can_migrate_task()
5686 * detach_tasks() -- tries to detach up to imbalance weighted load from
5695 unsigned long load; detach_tasks() local
5721 load = task_h_load(p); detach_tasks()
5723 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) detach_tasks()
5726 if ((load / 2) > env->imbalance) detach_tasks()
5733 env->imbalance -= load; detach_tasks()
5747 * weighted load. detach_tasks()
5821 /* throttled entities do not contribute to load */ __update_blocked_averages_cpu()
5871 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
5872 * This needs to be done in a top-down fashion because the load of a child
5873 * group is a fraction of its parents load.
5880 unsigned long load; update_cfs_rq_h_load() local
5899 load = cfs_rq->h_load;
5900 load = div64_ul(load * se->avg.load_avg_contrib,
5903 cfs_rq->h_load = load;
5939 unsigned long avg_load; /*Avg load across the CPUs of the group */
5940 unsigned long group_load; /* Total load over the CPUs of the group */
5941 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
5958 * during load balancing.
5963 unsigned long total_load; /* Total load of all groups in sd */
5965 unsigned long avg_load; /* Average load across all groups in sd */
5993 * get_sd_load_idx - Obtain the load index for a given sched domain.
5997 * Return: The load index.
6200 * account the variance of the tasks' load and to return true if the available
6201 * capacity in meaningful for the load balancer.
6203 * any benefit for the load balance.
6253 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
6254 * @env: The load balancing environment.
6256 * @load_idx: Load index of sched_domain of this_cpu for load calc.
6266 unsigned long load; update_sg_lb_stats() local
6276 load = target_load(i, load_idx); for_each_cpu_and()
6278 load = source_load(i, load_idx); for_each_cpu_and()
6280 sgs->group_load += load; for_each_cpu_and()
6311 * @env: The load balancing environment.
6389 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
6390 * @env: The load balancing environment.
6487 * @env: The load balancing environment.
6514 * load balancing.
6515 * @env: The load balancing environment.
6556 /* Amount of load we'd subtract */ fix_small_imbalance()
6563 /* Amount of load we'd add */ fix_small_imbalance()
6583 * groups of a given sched_domain during load balance.
6584 * @env: load balance environment
6598 * to ensure cpu-load equilibrium, look at wider averages. XXX calculate_imbalance()
6606 * max load less than avg load(as we skip the groups at or below calculate_imbalance()
6630 * want to push ourselves above the average load, nor do we wish to calculate_imbalance()
6631 * reduce the max loaded cpu below the average load. At the same time, calculate_imbalance()
6632 * we also don't want to reduce the group load below the group capacity calculate_imbalance()
6638 /* How much load to actually move to equalise the imbalance */ calculate_imbalance()
6645 * if *imbalance is less than the average load per runnable task calculate_imbalance()
6663 * Also calculates the amount of weighted load which should be moved
6666 * @env: The load balancing environment.
6681 * Compute the various statistics relavent for load balancing at find_busiest_group()
6688 /* ASYM feature bypasses nice load balance check */ find_busiest_group()
6722 * average load. find_busiest_group()
6788 * If we cannot move enough load due to this classification for_each_cpu_and()
6811 * For the load comparisons with the other cpu's, consider for_each_cpu_and()
6813 * that the load can be moved away from the cpu that is for_each_cpu_and()
6881 * to do the newly idle load balance. should_we_balance()
6902 * is eligible for doing load balancing at this and above domains. should_we_balance()
6985 * cur_ld_moved - load moved in current iteration load_balance()
6986 * ld_moved - cumulative load moved across iterations load_balance()
7019 * This changes load balance semantics a bit on who can move load_balance()
7020 * load to a given_cpu. In addition to the given_cpu itself load_balance()
7023 * load to given_cpu. In rare situations, this may cause load_balance()
7025 * _independently_ and at _same_ time to move some load to load_balance()
7026 * given_cpu) causing exceess load to be moved to given_cpu. load_balance()
7028 * moreover subsequent load balance cycles should correct the load_balance()
7029 * excess load moved. load_balance()
7100 * only after active load balance is finished. load_balance()
7385 * idle load balancing details
7387 * needed, they will kick the idle load balancer, which then does idle
7388 * load balancing for all the idle CPUs.
7427 * is idle. And the softirq performing nohz idle load balance nohz_balancer_kick()
7484 * This info will be used in performing idle load balancing in the future.
7525 * This trades load-balance latency on larger machines for less cross talk.
7570 * Stop the load balance at this level. There is another for_each_domain()
7571 * CPU in our sched group which is doing load balancing more for_each_domain()
7647 * If this cpu gets work to do, stop the load balancing for_each_cpu()
7648 * work being done for other cpus. Next load for_each_cpu()
7677 * Current heuristic for kicking the idle load balancer in the presence
7706 * None are in tickless mode and hence no need for NOHZ idle load nohz_kick_needed()
7769 * give the idle cpus a chance to load balance. Else we may run_rebalance_domains()
7770 * load balance only within the local sched_domain hierarchy run_rebalance_domains()
7771 * and abort nohz_idle_balance altogether if we pull some load. run_rebalance_domains()
7778 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
7927 * Remove our load from contribution when we leave sched_fair switched_from_fair()
8150 update_load_set(&se->load, NICE_0_LOAD); init_tg_cfs_entry()
8216 if (rq->cfs.load.weight) get_rr_interval_fair()
/linux-4.1.27/arch/powerpc/kvm/
H A Dfpu.S34 lfd 0,0(r3); /* load up fpscr value */ \
55 lfd 0,0(r3); /* load up fpscr value */ \
78 lfd 0,0(r3); /* load up fpscr value */ \
119 lfd 2,0(r8) /* load param3 */
121 lfd 1,0(r7) /* load param2 */
123 lfd 0,0(r6) /* load param1 */
125 lfd 3,0(r3) /* load up fpscr value */
127 lwz r6, 0(r4) /* load cr */
210 lfd 1,0(r6); /* load param2 */ \
211 lfd 0,0(r5); /* load param1 */ \
212 lfd 3,0(r3); /* load up fpscr value */ \
214 lwz r6, 0(r4); /* load cr */ \
/linux-4.1.27/arch/xtensa/boot/boot-elf/
H A DMakefile24 --set-section-flags image=contents,alloc,load,load,data \
/linux-4.1.27/arch/mips/fw/arc/
H A Dsalone.c2 * Routines to load into memory and execute stand-along program images using
12 return ARC_CALL4(load, Path, TopAddr, ExecAddr, LowAddr); ArcLoad()
/linux-4.1.27/drivers/watchdog/
H A Dsp805_wdt.c61 * @load_val: load value to be set for current timeout
77 /* This routine finds load value that will reset system in required timout */ wdt_setload()
81 u64 load, rate; wdt_setload() local
89 * load is half of what should be required. wdt_setload()
91 load = div_u64(rate, 2) * timeout - 1; wdt_setload()
93 load = (load > LOAD_MAX) ? LOAD_MAX : load; wdt_setload()
94 load = (load < LOAD_MIN) ? LOAD_MIN : load; wdt_setload()
97 wdt->load_val = load; wdt_setload()
99 wdd->timeout = div_u64((load + 1) * 2 + (rate / 2), rate); wdt_setload()
109 u64 load, rate; wdt_timeleft() local
114 load = readl_relaxed(wdt->base + WDTVALUE); wdt_timeleft()
118 load += wdt->load_val + 1; wdt_timeleft()
121 return div_u64(load, rate); wdt_timeleft()
/linux-4.1.27/arch/sparc/crypto/
H A Dcrop_devid.c7 * load any modules which have device table entries that
/linux-4.1.27/arch/sh/include/asm/
H A Datomic-grb.h14 " mov.l @%1, %0 \n\t" /* load old value */ \
34 " mov.l @%1, %0 \n\t" /* load old value */ \
65 " mov.l @%1, %0 \n\t" /* load old value */ atomic_clear_mask()
84 " mov.l @%1, %0 \n\t" /* load old value */ atomic_set_mask()
H A Dbitops-grb.h18 " mov.l @%1, %0 \n\t" /* load old value */ set_bit()
41 " mov.l @%1, %0 \n\t" /* load old value */ clear_bit()
64 " mov.l @%1, %0 \n\t" /* load old value */ change_bit()
88 " mov.l @%2, %0 \n\t" /* load old value */ test_and_set_bit()
121 " mov.l @%2, %0 \n\t" /* load old value */ test_and_clear_bit()
153 " mov.l @%2, %0 \n\t" /* load old value */ test_and_change_bit()
H A Dcmpxchg-grb.h14 " mov.l @%1, %0 \n\t" /* load old value */ xchg_u32()
35 " mov.b @%1, %0 \n\t" /* load old value */ xchg_u8()
59 " mov.l @%3, %0 \n\t" /* load old value */ __cmpxchg_u32()
/linux-4.1.27/tools/perf/tests/
H A Dpython-use.c2 * Just test if we can load the python binding.
/linux-4.1.27/arch/s390/kernel/
H A Dswsusp.S249 lctlg %c0,%c15,0x380(%r13) /* load control registers */
250 lam %a0,%a15,0x340(%r13) /* load access registers */
252 lfpc 0x31c(%r13) /* load fpu control */
253 ld 0,0x200(%r13) /* load f0 */
254 ld 1,0x208(%r13) /* load f1 */
255 ld 2,0x210(%r13) /* load f2 */
256 ld 3,0x218(%r13) /* load f3 */
257 ld 4,0x220(%r13) /* load f4 */
258 ld 5,0x228(%r13) /* load f5 */
259 ld 6,0x230(%r13) /* load f6 */
260 ld 7,0x238(%r13) /* load f7 */
261 ld 8,0x240(%r13) /* load f8 */
262 ld 9,0x248(%r13) /* load f9 */
263 ld 10,0x250(%r13) /* load f10 */
264 ld 11,0x258(%r13) /* load f11 */
265 ld 12,0x260(%r13) /* load f12 */
266 ld 13,0x268(%r13) /* load f13 */
267 ld 14,0x270(%r13) /* load f14 */
268 ld 15,0x278(%r13) /* load f15 */
H A Drelocate_kernel.S58 lghi %r7,4096 # load PAGE_SIZE in r7
59 lghi %r9,4096 # load PAGE_SIZE in r9
87 la %r4,load_psw-.base(%r13) # load psw-address into the register
88 o %r3,4(%r4) # or load address into psw
H A Dhead.S10 * 1) load the image directly into ram at address 0 and do an PSW restart
11 * 2) linload will load the image from address 0x10000 to memory 0x10000
44 .long 0x02000230,0x60000050 # by ipl and load the range
108 ssch 0(%r3) # load chunk of 1600 bytes
165 l %r1,0xb8 # load ipl subchannel number
166 la %r2,IPL_BS # load start address
167 bas %r14,.Lloader # load rest of ipl image
172 # load parameter file from ipl device
176 bas %r14,.Lloader # load parameter file
190 la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
211 # load ramdisk from ipl device
216 bas %r14,.Lloader # load ramdisk
H A Dhead64.S22 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
95 lam 0,15,.Laregs-.LPG3(%r13) # load acrs needed by uaccess
101 lpswe .Ldw-.(%r13) # load disabled wait psw
H A Dentry.S102 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
134 1: lg %r15,\stack # load target stack
189 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
191 lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
192 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
298 # _CIF_ASCE is set, load user space asce
302 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
313 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
316 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
389 lmg %r9,%r10,__PT_R9(%r11) # load gprs
447 lgf %r1,0(%r10,%r1) # load address of handler routine
599 # _CIF_ASCE is set, load user space asce
603 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
783 lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
866 0: # check if base register setup + TIF bit load has been done
1000 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
1004 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
1015 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1025 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
/linux-4.1.27/include/linux/platform_data/
H A Dbfin_rotary.h102 #define W1LCNT_ZERO (1 << 0) /* write 1 to load CNT_COUNTER with zero */
103 #define W1LCNT_MIN (1 << 2) /* write 1 to load CNT_COUNTER from CNT_MIN */
104 #define W1LCNT_MAX (1 << 3) /* write 1 to load CNT_COUNTER from CNT_MAX */
106 #define W1LMIN_ZERO (1 << 4) /* write 1 to load CNT_MIN with zero */
107 #define W1LMIN_CNT (1 << 5) /* write 1 to load CNT_MIN from CNT_COUNTER */
108 #define W1LMIN_MAX (1 << 7) /* write 1 to load CNT_MIN from CNT_MAX */
110 #define W1LMAX_ZERO (1 << 8) /* write 1 to load CNT_MAX with zero */
111 #define W1LMAX_CNT (1 << 9) /* write 1 to load CNT_MAX from CNT_COUNTER */
112 #define W1LMAX_MIN (1 << 10) /* write 1 to load CNT_MAX from CNT_MIN */
H A Dkeypad-ep93xx.h18 * @prescale: row/column counter pre-scaler load value
H A Dremoteproc-omap.h28 * @firmware: name of firmware file to load
/linux-4.1.27/drivers/isdn/sc/
H A Dshmem.c42 * determine the page to load from the address memcpy_toshmem()
47 * Block interrupts and load the page memcpy_toshmem()
81 * determine the page to load from the address memcpy_fromshmem()
88 * Block interrupts and load the page memcpy_fromshmem()
120 * determine the page to load from the address
126 * Block interrupts and load the page
/linux-4.1.27/arch/m68k/fpsp040/
H A Dsmovecr.S69 leal PIRZRM,%a0 |rmode is rz or rm, load PIRZRM in a0
72 leal PIRN,%a0 |rmode is rn, load PIRN in a0
75 leal PIRP,%a0 |rmode is rp, load PIRP in a0
84 leal SMALRZRM,%a0 |rmode is rz or rm, load SMRZRM in a0
89 leal SMALRN,%a0 |rmode is rn, load SMRN in a0
94 leal SMALRP,%a0 |rmode is rp, load SMRP in a0
105 leal BIGRZRM,%a0 |rmode is rz or rm, load BGRZRM in a0
112 leal BIGRN,%a0 |rmode is rn, load BGRN in a0
119 leal BIGRP,%a0 |rmode is rp, load SMRP in a0
129 movel %d1,L_SCR1(%a6) |load mode for round call
144 movel (%a0,%d0),FP_SCR1(%a6) |load first word to temp storage
145 movel 4(%a0,%d0),FP_SCR1+4(%a6) |load second word
146 movel 8(%a0,%d0),FP_SCR1+8(%a6) |load third word
H A Ddo_func.S98 | These routines load forced values into fp0. They are called
110 bsr ld_mzero |if neg, load neg zero, return here
118 bne ld_mzero |if neg, load neg zero
119 bra ld_pzero |load positive zero
310 beq ld_pzero |if pos then load +0
311 bra ld_mzero |else neg load -0
390 beq ld_pzero |if pos then load +0
391 bra ld_mzero |else neg load -0
448 leal pscalet,%a1 |load start of jump table
449 movel (%a1,%d1.w*4),%a1 |load a1 with label depending on tag
460 beq ld_pzero |if pos then load +0
461 bra ld_mzero |if neg then load -0
464 beq ld_pinf |if pos then load +inf
465 bra ld_minf |else neg load -inf
485 fmovex QNAN,%fp1 |load NAN
487 fmovex QNAN,%fp0 |load NAN
507 fmovex PPIBY2,%fp0 |load +pi/2
513 fmovex MPIBY2,%fp0 |load -pi/2
520 fmovex PINF,%fp0 |load +inf
527 fmovex MINF,%fp0 |load -inf
534 fmovex PONE,%fp0 |load +1
540 fmovex MONE,%fp0 |load -1
547 fmovex PZERO,%fp0 |load +0
554 fmovex MZERO,%fp0 |load -0
H A Dsint.S158 | Sign is +. If rp, load +1.0, if rm, load +0.0
161 beqs un_ldpone |if rp, load +1.0
162 bsr ld_pzero |if rm, load +0.0
168 | Sign is -. If rm, load -1.0, if rp, load -0.0
172 beqs un_ldmone |if rm, load -1.0
173 bsr ld_mzero |if rp, load -0.0
H A Dsgetem.S59 movew LOCAL_EX(%a0),%d0 |load resulting exponent into d0
73 | For normalized numbers, leave the mantissa alone, simply load
90 | then load the exponent with +/1 $3fff.
94 movel LOCAL_HI(%a0),%d0 |load ms mant in d0
95 movel LOCAL_LO(%a0),%d1 |load ls mant in d1
H A Dx_snan.S165 movel #4,%d0 |load byte count
177 movel %a0,%a1 |load dest addr into a1
178 movel %a7,%a0 |load src addr of snan into a0
187 movel #2,%d0 |load byte count
199 movel %a0,%a1 |load dest addr into a1
209 movel #1,%d0 |load byte count
220 movel %a0,%a1 |load dest addr into a1
/linux-4.1.27/arch/xtensa/boot/boot-redboot/
H A DMakefile25 --set-section-flags image=contents,alloc,load,load,data \
H A Dbootstrap.S12 * can fit in the space before the load address.
71 * load address of this image is not at an arbitrary address,
76 l32r, so we load to a4 first. */
131 # a0: load address
163 # a0: load address
/linux-4.1.27/arch/parisc/include/asm/
H A Dprefetch.h8 * PA7300LC (page 14-4 of the ERS) also implements prefetching by a load
9 * to gr0 but not in a way that Linux can use. If the load would cause an
/linux-4.1.27/arch/x86/realmode/rm/
H A Dtrampoline_32.S18 * and IP is zero. Thus, we load CS to the physical segment
48 * lgdt will not be able to load the address as in real mode default
52 lidtl tr_idt # load idt with 0, 0
53 lgdtl tr_gdt # load gdt with whatever is appropriate
H A Dtrampoline_64.S62 * lgdt will not be able to load the address as in real mode default
67 lidtl tr_idt # load idt with 0, 0
68 lgdtl tr_gdt # load gdt with whatever is appropriate
H A Dwakemain.c19 outb(0xb6, 0x43); /* Ctr 2, squarewave, load, binary */ beep()
/linux-4.1.27/arch/mips/lantiq/xway/
H A Dxrx200_phy_fw.c34 "failed to load firmware filename\n"); xway_gphy_load()
42 "failed to load firmware filename\n"); xway_gphy_load()
49 dev_err(&pdev->dev, "failed to load firmware filename\n"); xway_gphy_load()
55 dev_err(&pdev->dev, "failed to load firmware: %s\n", fw_name); xway_gphy_load()
/linux-4.1.27/arch/m32r/boot/compressed/
H A Dboot.h2 * 1. load vmlinuz
H A DMakefile13 # IMAGE_OFFSET is the load offset of the compression loader
/linux-4.1.27/tools/testing/selftests/firmware/
H A Dfw_userhelper.sh3 # to load firmware it can't find on disk itself. We must request a firmware
5 # won't find so that we can do the load ourself manually.
29 # This will block until our load (below) has finished.
80 # Do a proper load, which should work correctly.
H A Dfw_filesystem.sh2 # This validates that the kernel will load firmware out of its list of
4 # we reset the custom load directory to a location the user helper doesn't
47 # This should succeed via kernel load or will fail after 1 second after
/linux-4.1.27/samples/bpf/
H A Dbpf_helpers.h30 unsigned long long off) asm("llvm.bpf.load.byte");
32 unsigned long long off) asm("llvm.bpf.load.half");
34 unsigned long long off) asm("llvm.bpf.load.word");
/linux-4.1.27/sound/
H A Dsound_firmware.c20 printk(KERN_INFO "Unable to load '%s'.\n", fn); do_mod_firmware_load()
51 * mod_firmware_load - load sound driver firmware
60 * The length of the buffer is returned on a successful load, the
/linux-4.1.27/drivers/misc/genwqe/
H A Dcard_dev.c499 * @load: details about image load
507 struct genwqe_bitstream *load) do_flash_update()
522 if ((load->size & 0x3) != 0) do_flash_update()
525 if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) do_flash_update()
529 switch ((char)load->partition) { do_flash_update()
543 buf = (u8 __user *)load->data_addr; do_flash_update()
548 blocks_to_flash = load->size / FLASH_BLOCK; do_flash_update()
549 while (load->size) { do_flash_update()
556 tocopy = min_t(size_t, load->size, FLASH_BLOCK); do_flash_update()
586 req->__asiv[24] = load->uid; do_flash_update()
590 *(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id); do_flash_update()
591 *(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id); do_flash_update()
598 *(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24); do_flash_update()
602 *(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id); do_flash_update()
603 *(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id); do_flash_update()
616 load->retc = req->retc; do_flash_update()
617 load->attn = req->attn; do_flash_update()
618 load->progress = req->progress; do_flash_update()
631 load->size -= tocopy; do_flash_update()
644 struct genwqe_bitstream *load) do_flash_read()
658 if ((load->size & 0x3) != 0) do_flash_read()
661 if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) do_flash_read()
665 switch ((char)load->partition) { do_flash_read()
679 buf = (u8 __user *)load->data_addr; do_flash_read()
684 blocks_to_flash = load->size / FLASH_BLOCK; do_flash_read()
685 while (load->size) { do_flash_read()
690 tocopy = min_t(size_t, load->size, FLASH_BLOCK); do_flash_read()
712 cmd->__asiv[24] = load->uid; do_flash_read()
720 *(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24); do_flash_read()
734 load->retc = cmd->retc; do_flash_read()
735 load->attn = cmd->attn; do_flash_read()
736 load->progress = cmd->progress; do_flash_read()
760 load->size -= tocopy; do_flash_read()
1149 struct genwqe_bitstream load; genwqe_ioctl() local
1157 if (copy_from_user(&load, (void __user *)arg, genwqe_ioctl()
1158 sizeof(load))) genwqe_ioctl()
1161 rc = do_flash_update(cfile, &load); genwqe_ioctl()
1163 if (copy_to_user((void __user *)arg, &load, sizeof(load))) genwqe_ioctl()
1170 struct genwqe_bitstream load; genwqe_ioctl() local
1178 if (copy_from_user(&load, (void __user *)arg, sizeof(load))) genwqe_ioctl()
1181 rc = do_flash_read(cfile, &load); genwqe_ioctl()
1183 if (copy_to_user((void __user *)arg, &load, sizeof(load))) genwqe_ioctl()
506 do_flash_update(struct genwqe_file *cfile, struct genwqe_bitstream *load) do_flash_update() argument
643 do_flash_read(struct genwqe_file *cfile, struct genwqe_bitstream *load) do_flash_read() argument
/linux-4.1.27/drivers/remoteproc/
H A Dremoteproc_internal.h32 * @load: load firmeware to memory, where the remote processor
43 int (*load)(struct rproc *rproc, const struct firmware *fw); member in struct:rproc_fw_ops
92 if (rproc->fw_ops->load) rproc_load_segments()
93 return rproc->fw_ops->load(rproc, fw); rproc_load_segments()
/linux-4.1.27/drivers/cpufreq/
H A Dcpufreq_governor.c70 unsigned int load; dbs_check_cpu() local
116 * on this CPU now, it would be unfair to calculate 'load' the dbs_check_cpu()
118 * near-zero load, irrespective of how CPU intensive that task dbs_check_cpu()
122 * To avoid this, we reuse the 'load' from the previous dbs_check_cpu()
125 * this copy, lest we get stuck at a high load (high frequency) dbs_check_cpu()
126 * for too long, even when the current system load has actually dbs_check_cpu()
142 load = j_cdbs->prev_load; dbs_check_cpu()
146 * the previous load only once, upon the first wake-up dbs_check_cpu()
151 load = 100 * (wall_time - idle_time) / wall_time; dbs_check_cpu()
152 j_cdbs->prev_load = load; dbs_check_cpu()
155 if (load > max_load) dbs_check_cpu()
156 max_load = load; dbs_check_cpu()
211 /* Will return if we need to evaluate cpu load again or not */ need_load_eval()
/linux-4.1.27/drivers/staging/comedi/drivers/
H A Ds526.c158 data[2]: Pre-load Register Value s526_gpct_insn_config()
168 cmReg.reg.autoLoadResetRcap = 0;/* Auto load disabled */ s526_gpct_insn_config()
226 /* Auto load with INDEX^ */ s526_gpct_insn_config()
233 /* Load the pre-load register high word */ s526_gpct_insn_config()
237 /* Load the pre-load register low word */ s526_gpct_insn_config()
260 data[2]: Pre-load Register 0 Value s526_gpct_insn_config()
261 data[3]: Pre-load Register 1 Value s526_gpct_insn_config()
271 /* Load the pre-load register 0 high word */ s526_gpct_insn_config()
275 /* Load the pre-load register 0 low word */ s526_gpct_insn_config()
284 /* Load the pre-load register 1 high word */ s526_gpct_insn_config()
288 /* Load the pre-load register 1 low word */ s526_gpct_insn_config()
303 data[2]: Pre-load Register 0 Value s526_gpct_insn_config()
304 data[3]: Pre-load Register 1 Value s526_gpct_insn_config()
314 /* Load the pre-load register 0 high word */ s526_gpct_insn_config()
318 /* Load the pre-load register 0 low word */ s526_gpct_insn_config()
327 /* Load the pre-load register 1 high word */ s526_gpct_insn_config()
331 /* Load the pre-load register 1 low word */ s526_gpct_insn_config()
/linux-4.1.27/arch/alpha/lib/
H A Dmemset.S60 mskql $4,$16,$4 /* .. E1 (and possible load stall) */
87 mskqh $7,$6,$2 /* .. E1 (and load stall) */
98 mskql $1,$16,$4 /* E0 (after load stall) */
H A Dstrcat.S21 ldq_u $1, 0($16) # load first quadword (a0 may be misaligned)
H A Dmemcpy.c68 * Note the ordering to try to avoid load (and address generation) latencies.
110 * for the load-store. I don't know why, but it would seem that using a floating
114 * Note the ordering to try to avoid load (and address generation) latencies.
H A Dev67-strlen.S28 ldq_u $1, 0($16) # L : load first quadword ($16 may be misaligned)
H A Dstrlen.S24 ldq_u $1, 0($16) # load first quadword ($16 may be misaligned)
H A Dev6-stxncpy.S85 * separate store quads from load quads
166 ldq_u t1, 0(a1) # L : load first src word
196 ldq_u t2, 8(a1) # L : Latency=3 load second src word
256 ldq_u t2, 8(a1) # U : Latency=3 load high word for next time
297 and t12, 0x80, t6 # E : avoid dest word load if we can (stall)
317 and a1, 7, t6 # E : avoid final load if possible
322 ldq_u t2, 8(a1) # L : load final src word
338 ldq_u t1, 0(a1) # L : load first source word
341 /* Conditionally load the first destination word and a bytemask
H A Dstrrchr.S26 ldq_u t0, 0(a0) # .. e1 : load first quadword
44 ldq t0, 8(v0) # e0 : load next quadword
H A Dev67-strcat.S34 ldq_u $1, 0($16) # L : load first quadword (a0 may be misaligned)
H A Dstrncat.S27 ldq_u $1, 0($16) # load first quadword ($16 may be misaligned)
H A Dev6-stxcpy.S70 /* Nops here to separate store quads from load quads */
128 ldq_u t1, 0(a1) # L : load first src word
129 and a0, 7, t0 # E : take care not to load a word ...
208 ldq_u t2, 0(a1) # L : Latency=3 load high word for next time
244 and t12, 0x80, t6 # E : avoid dest word load if we can (stall)
266 ldq_u t1, 0(a1) # L : load first source word
269 /* Conditionally load the first destination word and a bytemask
H A Dstxncpy.S134 ldq_u t1, 0(a1) # e0 : load first src word
157 ldq_u t2, 8(a1) # e0 : load second src word
213 ldq_u t2, 8(a1) # e0 : load high word for next time
254 and t12, 0x80, t6 # e0 : avoid dest word load if we can
276 ldq_u t2, 8(a1) # e0 : load final src word
292 ldq_u t1, 0(a1) # e0 : load first source word
297 /* Conditionally load the first destination word and a bytemask
/linux-4.1.27/drivers/net/can/softing/
H A Dsofting_cs.c47 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
59 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
71 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
83 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
95 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
107 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
119 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
131 .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
143 .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
H A Dsofting_platform.h31 } boot, load, app; member in struct:softing_platform_data
/linux-4.1.27/include/uapi/linux/
H A Dsysinfo.h9 __kernel_ulong_t loads[3]; /* 1, 5, and 15 minute load averages */
H A Db1lli.h63 #define AVMB1_LOAD 0 /* load image to card */
66 #define AVMB1_LOAD_AND_CONFIG 3 /* load image and config to card */
H A Delf-fdpic.h1 /* elf-fdpic.h: FDPIC ELF load map
H A Dcycx_cfm.h57 * @codeoffs - code load offset
59 * @dataoffs - configuration data load offset
H A Dflat.h50 #define FLAT_FLAG_RAM 0x0001 /* load program entirely into RAM */
H A Dkexec.h17 * Kexec file load interface flags.
/linux-4.1.27/arch/s390/kernel/vdso32/
H A Dclock_gettime.S35 1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
78 9: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
88 10: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
98 11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
/linux-4.1.27/net/netfilter/ipvs/
H A DMakefile5 # IPVS transport protocol load balancing support
H A Dip_vs_pe.c47 /* Lookup pe and try to load it if it doesn't exist */ ip_vs_pe_getbyname()
55 /* If pe not found, load the module and search again */ ip_vs_pe_getbyname()
H A Dip_vs_sed.c25 * A. Weinrib and S. Shenker, Greed is not enough: Adaptive load sharing
71 * We calculate the load of each dest server as follows: ip_vs_sed_schedule()
95 * Find the destination with the least load. ip_vs_sed_schedule()
H A Dip_vs_nq.c23 * A. Weinrib and S. Shenker, Greed is not enough: Adaptive load sharing
67 * We calculate the load of each dest server as follows: ip_vs_nq_schedule()
H A Dip_vs_wlc.c43 * We calculate the load of each dest server as follows: ip_vs_wlc_schedule()
67 * Find the destination with the least load. ip_vs_wlc_schedule()
/linux-4.1.27/drivers/s390/char/
H A Dsclp_diag.h15 #define SCLP_DIAG_FTP_LDFAIL 0x01U /* load failed */
33 * @ldflg: load flag (see defines above)
/linux-4.1.27/arch/mips/include/asm/
H A Dftrace.h22 #define safe_load(load, src, dst, error) \
25 "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
H A Dprefetch.h29 * VR5500 (including VR5701 and VR7701) only implement load prefetch.
/linux-4.1.27/arch/ia64/lib/
H A Dstrlen_user.S34 // string may not be 8-byte aligned. In this case we load the 8byte
48 // load, on the other hand, will cause the translation to be inserted
99 ld8.s v[1]=[src],8 // load the initial 8bytes (must speculate)
103 ld8.s w[1]=[src],8 // load next 8 bytes in 2nd pipeline
115 ld8.s v[0]=[src],8 // speculatively load next
119 ld8.s w[0]=[src],8 // speculatively load next to next
166 EX(.Lexit1, ld8 val=[base],8) // load the initial bytes
191 // We failed even on the normal load (called from exception handler)
H A Dstrlen.S30 // string may not be 8-byte aligned. In this case we load the 8byte
45 // load, on the other hand, will cause the translation to be inserted
101 ld8.s w[1]=[src],8 // speculatively load next
112 ld8.s v[0]=[src],8 // speculatively load next
116 ld8.s w[0]=[src],8 // speculatively load next to next
H A Dcopy_user.S138 // load, then restart and finish the pipleline by copying zeros
140 // If 8-byte software pipeline fails on the load, do the same as
415 // on the load.
421 // reflect where the faulty load was.
424 // When you get a fault on load, you may have valid data from
433 // We simply replace the load with a simple mov and keep the
450 // This is the case where the byte by byte copy fails on the load
467 // The following code handles only the load failures. The
469 // scheduled. So when you fail on a load, the stores corresponding
494 // The numbers on each page show the size of the load (current alignment).
529 // This allows us to assume that if we fail on a load we haven't possibly
542 // because we failed why trying to do a load, i.e. there is still
H A Ddo_csum.S22 * for the loop. Support the cases where load latency = 1 or 2.
44 // possible load latency and also to accommodate for head and tail.
86 // - Note on prefetching: it was found that under various load, i.e. ftp read/write,
121 #define ELD p[LOAD_LATENCY] // end of load
159 ld8 firstval=[first1],8 // load, ahead of time, "first1" word
163 (p9) ld8 lastval=[last] // load, ahead of time, "last" word, if needed
179 // load two back-to-back 8-byte words per loop thereafter.
194 ld8 word1[1]=[first1],8 // load an 8-byte word
/linux-4.1.27/arch/m32r/include/asm/
H A Dswitch_to.h33 " ld lr, @%5 ; load new LR \n" \
36 " ld sp, @%3 ; load new SP \n" \
H A Delf.h76 * This is used to ensure we don't load something for the wrong architecture.
126 /* This yields a string that ld.so will use to load implementation
/linux-4.1.27/arch/sparc/include/uapi/asm/
H A Dasi.h157 #define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load,
160 #define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */
162 #define ASI_QUAD_LDD_PHYS_4V 0x26 /* (4V) Physical, qword load */
163 #define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */
164 #define ASI_QUAD_LDD_PHYS_L_4V 0x2e /* (4V) Phys, qword load, l-endian */
169 #define ASI_QUAD_LDD_PHYS 0x34 /* (III+) PADDR, qword load */
174 #define ASI_QUAD_LDD_PHYS_L 0x3c /* (III+) PADDR, qw-load, l-endian */
227 #define ASI_BLK_AIUP 0x70 /* Primary, user, block load/store */
270 #define ASI_BLK_INIT_QUAD_LDD_P 0xe2 /* (NG) init-store, twin load,
273 #define ASI_BLK_INIT_QUAD_LDD_S 0xe3 /* (NG) init-store, twin load,
278 #define ASI_ST_BLKINIT_MRU_P 0xf2 /* (NG4) init-store, twin load,
282 #define ASI_ST_BLKINIT_MRU_S 0xf2 /* (NG4) init-store, twin load,
288 #define ASI_ST_BLKINIT_MRU_PL 0xfa /* (NG4) init-store, twin load,
292 #define ASI_ST_BLKINIT_MRU_SL 0xfb /* (NG4) init-store, twin load,
/linux-4.1.27/drivers/net/wireless/libertas/
H A Dfirmware.c19 lbs_deb_fw("firmware load complete, code %d\n", ret); lbs_fw_loaded()
119 * lbs_get_firmware_async - Retrieves firmware asynchronously. Can load
128 * @callback: User callback to invoke when firmware load succeeds or fails.
138 lbs_deb_fw("firmware load already in progress\n"); lbs_get_firmware_async()
150 lbs_deb_fw("Starting async firmware load\n"); lbs_get_firmware_async()
/linux-4.1.27/arch/tile/lib/
H A Dcacheflush.c28 /* Force a load instruction to issue. */ force_load()
87 * Issue a load to the last cache line, which can't complete finv_buffer_remote()
90 * memory, that one load would be sufficient, but since we may finv_buffer_remote()
91 * be, we also need to back up to the last load issued to finv_buffer_remote()
103 * practice this ends up being close enough to "load from finv_buffer_remote()
110 * load in the entire range, so we just re-load them all. finv_buffer_remote()
/linux-4.1.27/include/trace/events/
H A Dkvm.h231 {1, "load"}
234 TP_PROTO(int load),
235 TP_ARGS(load),
238 __field( u32, load )
242 __entry->load = load;
245 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
/linux-4.1.27/arch/sparc/kernel/
H A Dunaligned_32.c2 * unaligned.c: Unaligned load/store trap handling with special
24 load, /* ld, ldd, ldh, ldsh */ enumerator in enum:direction
37 return load; decode_direction()
245 printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n", kernel_unaligned_trap()
247 unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store."); kernel_unaligned_trap()
254 case load: kernel_unaligned_trap()
280 int check = (dir == load) ? VERIFY_READ : VERIFY_WRITE; ok_for_user()
338 printk("User FPU load/store unaligned unsupported.\n"); user_unaligned_trap()
345 case load: user_unaligned_trap()
H A Dktlb.S52 /* fallthrough to TLB load */
66 * to the sun4v tlb load code. The registers are setup
73 * The sun4v TLB load wants the PTE in %g3 so we fix that
160 /* fallthrough to TLB load */
174 * to the sun4v tlb load code. The registers are setup
181 * The sun4v TLB load wants the PTE in %g3 so we fix that
H A Dled.c39 if (!timeout) { /* blink according to load */ led_blink()
98 } else if (!strcmp(buf, "load")) { led_proc_write()
H A Dwuf.S198 /* The users stack area is kosher and mapped, load the
224 * that until we actually load the window up we are free
261 * just let the load rip, then check the sfsr to see if
281 LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
282 SUN_PI_(lda [%g0] ASI_M_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/engine/disp/
H A Ddacnv50.c72 nv_ioctl(object, "disp dac load size %d\n", size); nv50_dac_sense()
74 nv_ioctl(object, "disp dac load vers %d data %08x\n", nv50_dac_sense()
97 args->v0.load = (loadval & 0x38000000) >> 27; nv50_dac_sense()
/linux-4.1.27/arch/arm/probes/kprobes/
H A Dcheckers-thumb.c33 * Following load insns may come here: t32_check_stack()
36 * load and store. All load insns have this bit set, when t32_check_stack()
/linux-4.1.27/arch/ia64/kernel/
H A Dentry.S195 ld8 sp=[r21] // load kernel stack pointer of new task
464 ld8 r16 = [r15] // load next's stack pointer
523 // the syscall number may have changed, so re-load it and re-calculate the
536 (p6) ld8 r20=[r20] // load address of syscall entry point
558 ld8 r3=[r2] // load pt_regs.r8
732 (p6) ld4 r31=[r18] // load current_thread_info()->flags
733 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
741 (p6) ld4 r31=[r18] // load current_thread_info()->flags
742 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
747 ld8 r18=[r2],PT(R9)-PT(B6) // load b6
750 ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
763 ld8 r29=[r2],16 // M0|1 load cr.ipsr
764 ld8 r28=[r3],16 // M0|1 load cr.iip
768 ld8 r30=[r2],16 // M0|1 load cr.ifs
769 ld8 r25=[r3],16 // M0|1 load ar.unat
775 ld8 r30=[r2],16 // M0|1 load cr.ifs
776 ld8 r25=[r3],16 // M0|1 load ar.unat
780 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
784 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
785 ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
788 ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage)
789 ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates
792 ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
793 ld8.fill r1=[r3],16 // M0|1 load r1
872 (p6) ld4 r31=[r17] // load current_thread_info()->flags
881 ld8 r28=[r2],8 // load b6
889 ld8 r15=[r30] // load ar.ccv
892 ld8 r29=[r2],16 // load b7
893 ld8 r30=[r3],16 // load ar.csd
896 ld8 r31=[r2],16 // load ar.ssd
963 ld8 r29=[r16],16 // load cr.ipsr
964 ld8 r28=[r17],16 // load cr.iip
966 ld8 r30=[r16],16 // load cr.ifs
967 ld8 r25=[r17],16 // load ar.unat
969 ld8 r26=[r16],16 // load ar.pfs
970 ld8 r27=[r17],16 // load ar.rsc
973 ld8 r24=[r16],16 // load ar.rnat (may be garbage)
974 ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
976 ld8 r31=[r16],16 // load predicates
977 ld8 r21=[r17],16 // load b0
979 ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
980 ld8.fill r1=[r17],16 // load r1
1060 mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
1202 ld8 r3=[r2] // load pt_regs.r8
1254 ld8 r9=[sp] // load new unat from sigscratch->scratch_unat
1296 ld8 r9=[sp] // load new ar.unat
H A Dunaligned.c75 * load/store so we can use [35:32] instead, which gives the following
77 * checking the m-bit until later in the load/store emulation.
108 * a load/store of this form.
150 * a load/store of this form.
492 * We need to clear the corresponding UNAT bit to fully emulate the load setreg()
679 printk(KERN_ERR "%s: register update on speculative load, error\n", __func__); emulate_load_updates()
680 if (die_if_kernel("unaligned reference on speculative load with register update\n", emulate_load_updates()
724 * (where the load does not happen) emulate_load_updates()
726 * The way the load algorithm works, we know that r3 does not emulate_load_updates()
765 * ldX.a we will emulate load and also invalidate the ALAT entry. emulate_load_int()
787 * ldX.acq (ordered load): emulate_load_int()
790 * ldX.c.clr (check load and clear): emulate_load_int()
792 * Therefore the operation reverts to a normal load emulate_load_int()
794 * ldX.c.nc (check load no clear): emulate_load_int()
797 * ldX.c.clr.acq (ordered check load and clear): emulate_load_int()
798 * - same as above for c.clr part. The load needs to have acquire semantics. So emulate_load_int()
801 * ldX.a (advanced load): emulate_load_int()
804 * possibly need more than one load to get the result. emulate_load_int()
806 * The load part can be handled just like a normal load, however the difficult emulate_load_int()
808 * in the base address of the load & size. To do that, a ld.a must be executed, emulate_load_int()
812 * which would overlap within [r3,r3+X] (the size of the load was store in the emulate_load_int()
832 * execute exactly the same kind of load. You could do it from a aligned emulate_load_int()
835 * So no matter what, it is not possible to emulate an advanced load emulate_load_int()
838 * We will always convert ld.a into a normal load with ALAT invalidated. This emulate_load_int()
842 * If there is a store after the advanced load, one must either do a ld.c.* or emulate_load_int()
846 * - ld.c.*, if the entry is not present a normal load is executed emulate_load_int()
849 * In either case, the load can be potentially retried in another form. emulate_load_int()
856 * when the load has the .acq completer then emulate_load_int()
863 * invalidate ALAT entry in case of advanced load emulate_load_int()
1108 printk(KERN_ERR "%s: register update on speculative load pair, error\n", emulate_load_floatpair()
1396 * load/store: ia64_handle_unaligned()
1415 * to let the load fail. ia64_handle_unaligned()
H A Defi_stub.S48 ld8 r2=[in0],8 // load EFI function's entry point
56 ld8 gp=[in0] // load EFI function's global pointer
H A Desi_stub.S50 ld8 r2=[in0],8 // load ESI function's entry point
73 ld8 gp=[in0] // load ESI function's global pointer
/linux-4.1.27/drivers/media/i2c/cx25840/
H A Dcx25840-firmware.c43 MODULE_PARM_DESC(firmware, "Firmware image to load");
88 v4l_err(client, "firmware %s load failed\n", check_fw_load()
101 v4l_err(client, "firmware load i2c failure\n"); fw_write()
163 /* Restore GPIO configuration after f/w load */ cx25840_loadfw()
/linux-4.1.27/arch/x86/purgatory/
H A Dsetup-x86_64.S22 /* load the data segments */
/linux-4.1.27/arch/microblaze/kernel/
H A Dmcount.S112 addik r5, r1, 120; /* MS: load parent addr */
113 addik r6, r15, 0; /* MS: load current function addr */
135 lwi r6, r1, 120; /* MS: load parent addr */
136 addik r5, r15, -4; /* MS: load current function addr */
H A Dheartbeat.c35 * load. It goes through the points f(0)=126, f(1)=86, microblaze_heartbeat()
/linux-4.1.27/arch/powerpc/platforms/powernv/
H A Dopal-tracepoints.c21 * enabled via a single load.
/linux-4.1.27/arch/s390/kernel/vdso64/
H A Dclock_gettime.S36 0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
62 3: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
72 4: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
82 5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
H A Dgettimeofday.S29 lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
/linux-4.1.27/arch/powerpc/kernel/
H A Dvmlinux.lds.S19 ELF file with all segments at load address 0 as input. This
23 into the "notes" segment (at a non-zero load address).
27 non-zero load address. It's not enough to always create the
28 "notes" segment, since if nothing gets assigned to it, its load
/linux-4.1.27/arch/arm64/include/asm/
H A Dspinlock.h25 * The memory barriers are implicit with the load-acquire and store-release
51 * unlock before the exclusive load. arch_spin_lock()
118 * The memory barriers are implicit with the load-acquire and store-release
173 * The memory barriers are implicit with the load-acquire and store-release
/linux-4.1.27/Documentation/trace/
H A Dfunction-graph-fold.vim4 " -S option to load from the command-line together with a trace. You can then
/linux-4.1.27/sound/pci/mixart/
H A Dmixart_hwdep.h70 #define MIXART_PSEUDOREG_PERF_STREAM_LOAD_OFFSET MIXART_PSEUDOREG+0x70 /* streaming load */
71 #define MIXART_PSEUDOREG_PERF_SYSTEM_LOAD_OFFSET MIXART_PSEUDOREG+0x78 /* system load (reference)*/
72 #define MIXART_PSEUDOREG_PERF_MAILBX_LOAD_OFFSET MIXART_PSEUDOREG+0x7C /* mailbox load */
73 #define MIXART_PSEUDOREG_PERF_INTERR_LOAD_OFFSET MIXART_PSEUDOREG+0x74 /* interrupt handling load */
/linux-4.1.27/tools/perf/scripts/perl/Perf-Trace-Util/lib/Perf/Trace/
H A DContext.pm23 XSLoader::load('Perf::Trace::Context', $VERSION);
/linux-4.1.27/drivers/clocksource/
H A Dscx200_hrt.c28 module_param(mhz27, int, 0); /* load time only */
32 module_param(ppm, int, 0); /* load time only */
H A Dnomadik-mtu.c108 /* Timer: configure load and background-load, and fire it up */ nmdk_clkevt_reset()
138 /* load some high default value */ nmdk_clkevt_mode()
151 /* ClockSource: configure load and background-load, and fire it up */ nmdk_clksrc_reset()
/linux-4.1.27/arch/sh/lib64/
H A Dstrcpy.S43 // r22 < r23 : Need to do a load from the destination.
44 // r22 == r23 : Doesn't actually need to load from destination,
/linux-4.1.27/arch/unicore32/include/asm/
H A Delf.h47 * This yields a string that ld.so will use to load implementation
60 * This is used to ensure we don't load something for the wrong architecture.
/linux-4.1.27/arch/openrisc/include/asm/
H A Delf.h26 * This is used to ensure we don't load something for the wrong architecture.
56 /* This yields a string that ld.so will use to load implementation
/linux-4.1.27/arch/powerpc/crypto/
H A Daes-spe-core.S28 evlwwsplat out,off(rT0); /* load word high */
31 lwz out,off(rT0); /* load word low */
34 lbz out,off(tab); /* load byte */
37 EAD(in, bpos) /* calc addr + load word high */ \
41 EAD(in, bpos) /* calc addr + load word low */ \
45 EAD(in, bpos) /* calc addr + load enc byte */ \
49 LBZ(out, rT0, 8) /* load enc byte */
52 DAD(in, bpos) /* calc addr + load dec byte */ \
/linux-4.1.27/arch/avr32/include/asm/
H A Delf.h65 * This is used to ensure we don't load something for the wrong architecture.
96 /* This yields a string that ld.so will use to load implementation
/linux-4.1.27/arch/c6x/include/asm/
H A Delf.h29 * This is used to ensure we don't load something for the wrong architecture.
74 /* This yields a string that ld.so will use to load implementation
/linux-4.1.27/arch/cris/include/arch-v32/arch/hwregs/
H A Ddma.h106 // load: g,c,d:burst
114 // load: c,d:burst
/linux-4.1.27/arch/arm/mach-mvebu/
H A Dpmsu_ll.S62 ldr r0, [r0] @ load the address of the
64 ldr r0, [r0] @ load the value in the
/linux-4.1.27/arch/arc/include/asm/
H A Delf.h36 * -we don't load something for the wrong architecture.
69 * This yields a string that ld.so will use to load implementation
H A Dspinlock.h41 * ACQUIRE barrier to ensure load/store after taking the lock arch_spin_lock()
45 * ARCv2 only has load-load, store-store and all-all barrier arch_spin_lock()
/linux-4.1.27/security/tomoyo/
H A Dload_policy.c71 * tomoyo_load_policy - Run external policy loader to load policy.
98 printk(KERN_INFO "Calling %s to load policy. Please wait.\n", tomoyo_load_policy()
/linux-4.1.27/drivers/net/wireless/ath/ath9k/
H A Dcalib.c278 * Wait for load to complete, should be fast, a few 10s of us. ath9k_hw_loadnf()
280 * since 250us often results in NF load timeout and causes deaf ath9k_hw_loadnf()
291 * We timed out waiting for the noisefloor to load, probably due to an ath9k_hw_loadnf()
292 * in-progress rx. Simply return here and allow the load plenty of time ath9k_hw_loadnf()
294 * trying to load -50 (which happens below) while the previous load is ath9k_hw_loadnf()
301 "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n", ath9k_hw_loadnf()
/linux-4.1.27/arch/arm/mach-ep93xx/
H A Dcrunch-bits.S146 teq r0, #0 @ anything to load?
151 cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word
154 cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators
179 cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers
233 mov r0, #0 @ nothing to load
276 mov r0, #0 @ nothing to load
307 1: @ this task owns crunch regs -- load them directly
/linux-4.1.27/arch/x86/crypto/
H A Dcrct10dif-pcl-asm_64.S90 # load the initial crc value
314 # load the shift constant
382 # now if there is, load the constants
387 movdqu (arg2), %xmm7 # load the plaintext
419 movdqu (arg2), %xmm7 # load the plaintext
430 # use stack space to load data less than 16 bytes, zero-out
445 # load 8 Bytes
456 # load 4 Bytes
467 # load 2 Bytes
477 # load 1 Byte
506 # load 3 Bytes
527 # load 2 Bytes
543 # load 1 Byte
/linux-4.1.27/arch/xtensa/kernel/
H A Dalign.S130 #define OP0_L32I_N 0x8 /* load immediate narrow */
185 rsr a8, excvaddr # load unaligned memory address
187 /* Now, identify one of the following load/store instructions.
204 rsr a7, epc1 # load exception address
208 l32i a4, a3, 0 # load 2 words
214 /* Analyze the instruction (load or store?). */
390 l32i a5, a4, 0 # load lower address word
476 l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler
482 l32i a0, a0, EXC_TABLE_FAST_USER # load handler
/linux-4.1.27/include/linux/regulator/
H A Dconsumer.h23 * to use most efficient operating mode depending upon voltage and load and
27 * IO and 1mA at idle. Device z draws 100mA when under load and 5mA when
31 * in normal mode for loads > 10mA and in IDLE mode for load <= 10mA.
48 * output load. This allows further system power savings by selecting the
49 * best (and most efficient) regulator mode for a desired load.
58 * FAST Regulator can handle fast changes in it's load.
60 * load can quickly increase with CPU frequency increases.
69 * to handle fast load switching.
74 * the most noisy and may not be able to handle fast load
/linux-4.1.27/drivers/media/pci/cx18/
H A Dcx18-av-firmware.c64 CX18_ERR_DEV(sd, "verification of %s firmware load " cx18_av_verifyfw()
73 CX18_INFO_DEV(sd, "verified load of %s firmware (%d bytes)\n", cx18_av_verifyfw()
93 /* The firmware load often has byte errors, so allow for several cx18_av_loadfw()
94 retries, both at byte level and at the firmware load level. */ cx18_av_loadfw()
140 CX18_ERR_DEV(sd, "unable to load firmware %s\n", FWFILE); cx18_av_loadfw()
/linux-4.1.27/drivers/media/usb/pvrusb2/
H A Dpvrusb2-devattr.h98 /* List of additional client modules we need to load */
101 /* List of defined client modules we need to load */
105 FX2 firmware check / load is skipped and we assume the device
116 Note: This is ignored if overridden on the module load line via
144 /* If set, we don't bother trying to load cx23416 firmware. */
/linux-4.1.27/drivers/crypto/vmx/
H A Dghashp8-ppc.pl63 lvx_u $H,0,r4 # load H
111 lvx_u $IN,0,$Xip # load Xi
113 lvx_u $Hl,r8,$Htbl # load pre-computed table
159 lvx_u $Xl,0,$Xip # load Xi
161 lvx_u $Hl,r8,$Htbl # load pre-computed table
H A Daesp8-ppc.pl493 lvx $ivec,0,$ivp # load [unaligned] iv
503 lvsr $inpperm,0,r11 # prepare for unaligned load
697 lvx $rndkey0,$x00,$key # load key schedule
709 stvx v24,$x00,$key_ # off-load round[1]
712 stvx v25,$x10,$key_ # off-load round[2]
719 stvx v24,$x00,$key_ # off-load round[3]
722 stvx v25,$x10,$key_ # off-load round[4]
733 lvx v24,$x00,$key_ # pre-load round[1]
735 lvx v25,$x10,$key_ # pre-load round[2]
742 lvx_u $in0,$x00,$inp # load first 8 "words"
849 lvx v24,$x00,$key_ # re-pre-load round[1]
859 lvx v25,$x10,$key_ # re-pre-load round[2]
880 lvx_u $in0,$x00,$inp # load next input block
1249 lvx $ivec,0,$ivp # load [unaligned] iv
1261 lvsr $inpperm,0,r11 # prepare for unaligned load
1400 lvx $rndkey0,$x00,$key # load key schedule
1412 stvx v24,$x00,$key_ # off-load round[1]
1415 stvx v25,$x10,$key_ # off-load round[2]
1422 stvx v24,$x00,$key_ # off-load round[3]
1425 stvx v25,$x10,$key_ # off-load round[4]
1436 lvx v24,$x00,$key_ # pre-load round[1]
1438 lvx v25,$x10,$key_ # pre-load round[2]
1522 lvx v24,$x00,$key_ # re-pre-load round[1]
1534 lvx v25,$x10,$key_ # re-pre-load round[2]
1537 lvx_u $in0,$x00,$inp # load input
/linux-4.1.27/arch/mips/mm/
H A Dtlbex.c50 * TLB load/store/modify handlers.
421 uasm_i_srl(&p, K0, K0, 22); /* load delay */ build_r3000_tlb_refill_handler()
426 uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ build_r3000_tlb_refill_handler()
429 uasm_i_nop(&p); /* load delay */ build_r3000_tlb_refill_handler()
740 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */ build_huge_update_entries()
747 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ build_huge_update_entries()
899 * to mimic that here by taking a load/istream page build_get_pgd_vmalloc64()
983 * in a different cacheline or a load instruction, probably any build_get_ptep()
1014 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ build_update_entries()
1018 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ build_update_entries()
1021 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ build_update_entries()
1056 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ build_update_entries()
1062 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */ build_update_entries()
1069 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */ build_update_entries()
1189 /* Adjust the context during the load latency. */ build_fast_tlb_refill_handler()
1195 * The in the LWX case we don't want to do the load in the build_fast_tlb_refill_handler()
1220 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ build_fast_tlb_refill_handler()
1224 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */ build_fast_tlb_refill_handler()
1227 UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */ build_fast_tlb_refill_handler()
1707 * R3000 style TLB load/store/modify handlers.
1757 uasm_i_srl(p, pte, pte, 22); /* load delay */ build_r3000_tlbchange_handler_head()
1762 uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ build_r3000_tlbchange_handler_head()
1765 uasm_i_tlbp(p); /* load delay */ build_r3000_tlbchange_handler_head()
1781 uasm_i_nop(&p); /* load delay */ build_r3000_tlb_load_handler()
1790 panic("TLB load handler fastpath space exceeded"); build_r3000_tlb_load_handler()
1793 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", build_r3000_tlb_load_handler()
1812 uasm_i_nop(&p); /* load delay */ build_r3000_tlb_store_handler()
1843 uasm_i_nop(&p); /* load delay */ build_r3000_tlb_modify_handler()
1863 * R4000 style TLB load/store/modify handlers.
1993 /* load it in the delay slot*/ build_r4000_tlb_load_handler()
1995 /* load it if ptr is odd */ build_r4000_tlb_load_handler()
2060 /* load it in the delay slot*/ build_r4000_tlb_load_handler()
2062 /* load it if ptr is odd */ build_r4000_tlb_load_handler()
2101 panic("TLB load handler fastpath space exceeded"); build_r4000_tlb_load_handler()
2104 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", build_r4000_tlb_load_handler()
/linux-4.1.27/arch/powerpc/include/asm/
H A Delf.h18 * This is used to ensure we don't load something for the wrong architecture.
67 /* This yields a string that ld.so will use to load implementation
76 * Power6 machine). ELF_BASE_PLATFORM allows ld.so to load libraries
H A Dpte-8xx.h20 * These will get masked from the level 2 descriptor at TLB load time, and
24 * load the PMD into MD_TWC. The 8M pages are only used for kernel
/linux-4.1.27/drivers/isdn/pcbit/
H A Dpcbit.h131 #define PCBIT_IOCTL_STRLOAD 0x03 /* start load mode */
132 #define PCBIT_IOCTL_ENDLOAD 0x04 /* end load mode */
/linux-4.1.27/drivers/media/usb/go7007/
H A Dgo7007-loader.c80 "unable to load firmware from file \"%s\"\n", fw1); go7007_loader_probe()
95 "unable to load firmware from file \"%s\"\n", fw2); go7007_loader_probe()
/linux-4.1.27/arch/sparc/include/asm/
H A Delf_32.h96 * This is used to ensure we don't load something for the wrong architecture.
125 /* This yields a string that ld.so will use to load implementation
/linux-4.1.27/arch/tile/mm/
H A Delf.c74 * Notify simulator of an ET_DYN object so we know the load address. notify_exec()
163 /* Report the interpreter's load address. */ elf_plat_init()
/linux-4.1.27/arch/microblaze/include/uapi/asm/
H A Delf.h24 * This is used to ensure we don't load something for the wrong architecture.
95 /* This yields a string that ld.so will use to load implementation
/linux-4.1.27/arch/mips/boot/compressed/
H A DMakefile65 --set-section-flags=.image=contents,alloc,load,readonly,data
69 # Calculate the load address of the compressed kernel image
/linux-4.1.27/arch/mips/include/asm/mach-ip27/
H A Dkernel-entry-init.h49 or t1, t1, t0 # Physical load address of kernel text
50 or t2, t2, t0 # Physical load address of kernel data
/linux-4.1.27/drivers/usb/misc/
H A Disight_firmware.c51 printk(KERN_ERR "Unable to load isight firmware\n"); isight_firmware_load()
96 "Failed to load isight firmware\n"); isight_firmware_load()
/linux-4.1.27/arch/nios2/include/asm/
H A Delf.h25 * This is used to ensure we don't load something for the wrong architecture.
95 /* This yields a string that ld.so will use to load implementation
/linux-4.1.27/arch/powerpc/boot/
H A Dmktree.c32 uint32_t bb_debug_flag; /* Run debugger or image after load */
50 fprintf(stderr, "usage: %s <zImage-file> <boot-image> <load address> <entry point>\n",argv[0]); main()
/linux-4.1.27/arch/blackfin/include/asm/
H A Dcplb.h114 /* CSYNC to ensure load store ordering */ _disable_cplb()
134 /* CSYNC to ensure load store ordering */ _enable_cplb()
/linux-4.1.27/arch/ia64/include/asm/
H A Dasmmacro.h68 * Mark instructions that need a load of a virtual address patched to be
69 * a load of a physical address. We use this either in critical performance
/linux-4.1.27/arch/m68k/include/asm/
H A Delf.h46 * This is used to ensure we don't load something for the wrong architecture.
110 /* This yields a string that ld.so will use to load implementation
/linux-4.1.27/arch/metag/include/asm/
H A Delf.h64 * This is used to ensure we don't load something for the wrong architecture.
97 /* This yields a string that ld.so will use to load implementation
/linux-4.1.27/arch/arm/nwfpe/
H A Dfpa11.c120 /* Emulate load/store opcodes. */ EmulateAll()
121 /* Emulate load/store multiple opcodes. */ EmulateAll()
H A Dfpopcode.h50 FIX (arithmetic followed by load/store)
51 FLT (load/store followed by arithmetic)
60 L load/store bit: 0 = store, 1 = load
180 === Definitions for load and store instructions
190 /* masks for load/store */
209 /* Tests for specific data transfer load/store opcodes. */
/linux-4.1.27/arch/arm/boot/bootp/
H A Dinit.S23 _start: add lr, pc, #-0x8 @ lr = current load addr
26 add r4, r4, lr @ r4 = initrd_start + load addr
/linux-4.1.27/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/
H A DEventClass.py16 EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
21 # the size of raw buffer, raw PEBS event with load latency data's
/linux-4.1.27/drivers/usb/host/
H A Dohci-q.c106 * does some load balancing; returns the branch, or negative errno.
108 static int balance (struct ohci_hcd *ohci, int interval, int load) balance() argument
120 if (branch < 0 || ohci->load [branch] > ohci->load [i]) { balance()
125 if ((ohci->load [j] + load) > 900) balance()
148 ed, ed->branch, ed->load, ed->interval); periodic_link()
175 ohci->load [i] += ed->load; periodic_link()
177 ohci_to_hcd(ohci)->self.bandwidth_allocated += ed->load / ed->interval; periodic_link()
247 branch = balance (ohci, ed->interval, ed->load); ed_schedule()
250 "ERR %d, interval %d msecs, load %d\n", ed_schedule()
251 branch, ed->interval, ed->load); ed_schedule()
285 ohci->load [i] -= ed->load; periodic_unlink()
287 ohci_to_hcd(ohci)->self.bandwidth_allocated -= ed->load / ed->interval; periodic_unlink()
291 ed, ed->branch, ed->load, ed->interval); periodic_unlink()
456 ed->load = usb_calc_bus_time ( ed_get()
H A Disp116x-hcd.c331 isp116x->load[i] -= ep->load;
335 ep->load / ep->period;
483 u16 load = 0; start_atl_transfers() local
503 if ((load = isp116x->load[index])) { start_atl_transfers()
528 len = (MAX_LOAD_LIMIT - load) / byte_time; start_atl_transfers()
549 load += len * byte_time; start_atl_transfers()
550 if (load > MAX_LOAD_LIMIT) start_atl_transfers()
650 static int balance(struct isp116x *isp116x, u16 period, u16 load) balance() argument
657 if (branch < 0 || isp116x->load[branch] > isp116x->load[i]) { balance()
661 if ((isp116x->load[j] + load) balance()
755 ep->load = usb_calc_bus_time(udev->speed, isp116x_urb_enqueue()
782 ep->branch = ret = balance(isp116x, ep->period, ep->load); isp116x_urb_enqueue()
808 isp116x->load[i] += ep->load; isp116x_urb_enqueue()
810 hcd->self.bandwidth_allocated += ep->load / ep->period; isp116x_urb_enqueue()
/linux-4.1.27/drivers/hwmon/
H A Dlm75.h24 we support. As the user is unlikely to load more than one driver
/linux-4.1.27/drivers/misc/sgi-xp/
H A Dxp_nofault.S11 * and attempts to load and consume a value from it. This function
/linux-4.1.27/drivers/media/tuners/
H A Dtuner-xc2028.h16 #define XC3028_FE_DEFAULT 0 /* Don't load SCODE */
/linux-4.1.27/arch/x86/mm/kmemcheck/
H A Dopcode.c35 * load/store that caused our #PF and this should work for all the opcodes
/linux-4.1.27/arch/x86/platform/intel-mid/device_libs/
H A Dplatform_max7315.c36 /* we have several max7315 on the board, we only need load several max7315_platform_data()
/linux-4.1.27/arch/xtensa/variants/dc233c/include/variant/
H A Dtie-asm.h130 * continue If macro invoked as part of a larger load sequence, set to 1
133 * in sequence) at which to load. Defaults to next available space
135 * select Select what category(ies) of registers to load, as a bitmask
139 * the corresponding registers is skipped without doing any load.
/linux-4.1.27/arch/sh/mm/
H A Dgup.c24 * taking any locks. For this we would like to load the pointers gup_get_pte()
42 * We must ensure here that the load of pte_low sees l iff pte_high gup_get_pte()
43 * sees h. We load pte_high *after* loading pte_low, which ensures we gup_get_pte()
52 * very careful -- it does not atomically load the pte or anything that gup_get_pte()
/linux-4.1.27/arch/sh/boot/compressed/
H A DMakefile17 # IMAGE_OFFSET is the load offset of the compression loader
/linux-4.1.27/arch/parisc/kernel/
H A Dreal2.S78 /* load up the arg registers from the saved arg area */
172 rsm PSW_SM_Q,%r0 /* disable Q & I bits to load iia queue */
206 rsm PSW_SM_Q,%r0 /* disable Q bit to load iia queue */
252 /* load up the arg registers from the saved arg area */
/linux-4.1.27/arch/cris/include/arch-v32/arch/
H A Delf.h9 * This is used to ensure we don't load something for the wrong architecture.
/linux-4.1.27/arch/frv/include/asm/
H A Dmem-layout.h35 * the slab must be aligned such that load- and store-double instructions don't
/linux-4.1.27/arch/arm/mach-omap1/
H A Dfpga.h44 /* cpu0 load-meter LEDs */
/linux-4.1.27/Documentation/prctl/
H A Ddisable-tsc-ctxt-sw-stress-test.c7 * Warning: this test will cause a very high load for a few seconds
H A Ddisable-tsc-on-off-stress-test.c7 * Warning: this test will cause a very high load for a few seconds
/linux-4.1.27/arch/alpha/boot/
H A Dbootp.c111 load(unsigned long dst, unsigned long src, unsigned long count) load() function
201 load(initrd_start, KERNEL_ORIGIN+KERNEL_SIZE, INITRD_IMAGE_SIZE); start_kernel()
203 load(START_ADDR+(4*KERNEL_SIZE), KERNEL_ORIGIN, KERNEL_SIZE); start_kernel()
204 load(START_ADDR, START_ADDR+(4*KERNEL_SIZE), KERNEL_SIZE); start_kernel()
/linux-4.1.27/net/bridge/netfilter/
H A Dnf_log_bridge.c71 /* Request to load the real packet loggers. */ nf_log_bridge_init()
/linux-4.1.27/net/netfilter/
H A Dxt_state.c48 pr_info("cannot load conntrack support for proto=%u\n", state_mt_check()
/linux-4.1.27/security/apparmor/
H A DMakefile37 # required by policy load to map policy ordering of RLIMITs to internal
/linux-4.1.27/tools/perf/
H A Dbuiltin-mem.c201 MEM_OPT("load", MEM_OPERATION_LOAD),
275 * default to both load an store sampling cmd_mem()
281 "type", "memory operations(load,store) Default load,store", cmd_mem()
/linux-4.1.27/include/asm-generic/
H A Dpreempt.h64 * Because of load-store architectures cannot do per-cpu atomic __preempt_count_dec_and_test()
/linux-4.1.27/sound/soc/codecs/
H A Dsigmadsp-i2c.c69 * @firmware_name: Name of the firmware file to load
H A Dsigmadsp-regmap.c34 * @firmware_name: Name of the firmware file to load
/linux-4.1.27/fs/hfsplus/
H A Dsuper.c404 pr_err("unable to load nls for utf8\n"); hfsplus_fill_super()
448 /* Set up operations so we can load metadata */ hfsplus_fill_super()
471 pr_err("failed to load extents file\n"); hfsplus_fill_super()
476 pr_err("failed to load catalog file\n"); hfsplus_fill_super()
483 pr_err("failed to load attributes file\n"); hfsplus_fill_super()
492 pr_err("failed to load allocation file\n"); hfsplus_fill_super()
501 pr_err("failed to load root directory\n"); hfsplus_fill_super()
/linux-4.1.27/arch/m68k/ifpsp060/src/
H A Disp.S1041 mov.l EXC_A0(%a6),%a0 # load current value
1052 mov.l EXC_A1(%a6),%a0 # load current value
1063 mov.l EXC_A2(%a6),%a0 # load current value
1074 mov.l EXC_A3(%a6),%a0 # load current value
1085 mov.l EXC_A4(%a6),%a0 # load current value
1096 mov.l EXC_A5(%a6),%a0 # load current value
1107 mov.l EXC_A6(%a6),%a0 # load current value
1120 mov.l EXC_A7(%a6),%a0 # load current value
2581 # load temp registers with operands
2597 clr.l %d7 # load %d7 w/ zero value
2849 mov.w EXC_CC(%a6),%cc # load old ccodes
2880 mov.w EXC_CC(%a6),%cc # load old ccodes
3104 mov.l ADDR(%a6),%a0 # load <ea>
3147 lea _CASHI(%pc),%a1 # load end of CAS core code
3150 lea _CASLO(%pc),%a1 # load begin of CAS core code
3197 # (4) Use "plpaw" instruction to pre-load ATC with effective #
3204 # (6) Use "plpar" instruction to do a re-load of ATC entries for #
3260 movq.l &0x1,%d0 # load user data fc
3263 movq.l &0x5,%d0 # load supervisor data fc
3283 # load the SFC and DFC with the appropriate mode.
3288 # pre-load the operand ATC. no page faults should occur here because
3290 plpaw (%a2) # load atc for ADDR1
3291 plpaw (%a4) # load atc for ADDR1+3
3292 plpaw (%a3) # load atc for ADDR2
3293 plpaw (%a5) # load atc for ADDR2+3
3308 plpar (%a2) # load atc for ADDR1
3309 plpar (%a4) # load atc for ADDR1+3
3311 # load the BUSCR values.
3585 # load the SFC and DFC with the appropriate mode.
3590 # pre-load the operand ATC. no page faults should occur because
3592 plpaw (%a2) # load atc for ADDR1
3593 plpaw (%a4) # load atc for ADDR1+1
3594 plpaw (%a3) # load atc for ADDR2
3595 plpaw (%a5) # load atc for ADDR2+1
3610 plpar (%a2) # load atc for ADDR1
3611 plpar (%a4) # load atc for ADDR1+3
3613 # load the BUSCR values.
3817 # (4) Use "plpaw" instruction to pre-load ATC with efective #
3872 movq.l &0x1,%d0 # load user data fc
3875 movq.l &0x5,%d0 # load supervisor data fc
3894 # load the SFC and DFC with the appropriate mode.
3896 movc %d0,%sfc # load new sfc
3897 movc %d0,%dfc # load new dfc
3899 # pre-load the operand ATC. no page faults should occur here because
3901 plpaw (%a1) # load atc for ADDR
3902 plpaw (%a2) # load atc for ADDR+1
3908 # load the BUSCR values.
3913 # pre-load the instruction cache for the following algorithm.
4052 # load the SFC and DFC with the appropriate mode.
4054 movc %d0,%sfc # load new sfc
4055 movc %d0,%dfc # load new dfc
4057 # pre-load the operand ATC. no page faults should occur here because
4059 plpaw (%a1) # load atc for ADDR
4060 plpaw (%a2) # load atc for ADDR+3
4066 # load the BUSCR values.
4198 # load the SFC and DFC with the appropriate mode.
4200 movc %d0,%sfc # load new sfc
4201 movc %d0,%dfc # load new dfc
4203 # pre-load the operand ATC. no page faults should occur here because
4205 plpaw (%a1) # load atc for ADDR
4206 plpaw (%a2) # load atc for ADDR+3
4212 # load the BUSCR values.
4217 # pre-load the instruction cache for the following algorithm.
/linux-4.1.27/arch/arm/crypto/
H A Daes-ce-core.S156 vld1.8 {q8-q9}, [\rk] @ load first 2 round keys
157 vld1.8 {q14}, [ip] @ load last round key
285 vld1.8 {q6}, [r5] @ load ctr
345 vmov ip, \sreg @ load next word of ctr
380 ldrd r4, r5, [sp, #16] @ load args
382 vld1.8 {q0}, [r5] @ load iv
388 ldr r6, [sp, #24] @ load AES key 2
H A Dbsaes-armv7.pl927 vld1.8 {@XMM[7]}, [$inp]! @ load round 0 key
929 vld1.8 {@XMM[15]}, [$inp]! @ load round 1 key
962 vld1.8 {@XMM[15]}, [$inp]! @ load next round key
1009 vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
1059 vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
1152 vld1.8 {@XMM[15]}, [$ivp] @ load IV
1160 vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
1202 vld1.8 {@XMM[0]}, [$inp]! @ load input
1336 vld1.8 {@XMM[0]}, [$fp,:64] @ load result
1394 vld1.8 {@XMM[0]}, [$ctr] @ load counter
1396 vldmia $keysched, {@XMM[4]} @ load round0 key
1413 vld1.8 {@XMM[0]}, [$ctr] @ load counter
1415 vldmia r12, {@XMM[4]} @ load round0 key
1443 vldmia $keysched, {@XMM[9]} @ load round0 key
1459 vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ load input
1482 vldmia $fp, {@XMM[0]} @ load counter
1490 vld1.8 {@XMM[8]}, [$inp]! @ load input
1547 ldr r8, [ip, #12] @ load counter LSW
1548 vld1.8 {@XMM[1]}, [ip] @ load whole counter value
1563 vld1.8 {@XMM[0]}, [r4]! @ load input
1564 vld1.8 {@XMM[1]}, [sp,:64] @ load encrypted counter
1668 vldmia $magic, {$twmask} @ load XTS magic
1738 vldmia $magic, {$twmask} @ load XTS magic
2093 vldmia $magic, {$twmask} @ load XTS magic
2163 vldmia $magic, {$twmask} @ load XTS magic
/linux-4.1.27/drivers/net/wireless/cw1200/
H A Dfwio.c150 pr_err("Can't load firmware file %s.\n", fw_path); cw1200_load_firmware_cw1200()
156 pr_err("Can't allocate firmware load buffer.\n"); cw1200_load_firmware_cw1200()
475 pr_err("Can't handle CW1160/1260 firmware load yet.\n"); cw1200_load_firmware()
482 pr_err("Can't perform firmware load for hw type %d.\n", cw1200_load_firmware()
488 pr_err("Firmware load error.\n"); cw1200_load_firmware()
/linux-4.1.27/tools/perf/scripts/python/
H A Dsched-migration.py148 def load(self): member in class:RunqueueSnapshot
174 diff = new_rq.load() - old_rq.load()
274 raw += "Load = %d\n" % rq.load()
284 load_rate = rq.load() / float(slice.total_load)

Completed in 5174 milliseconds

1234567891011>>