/linux-4.1.27/arch/sh/include/asm/ |
H A D | unaligned.h | 5 /* SH-4A can handle unaligned loads in a relatively neutered fashion. */
|
H A D | unaligned-sh4a.h | 5 * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only.
|
/linux-4.1.27/arch/avr32/include/asm/ |
H A D | unaligned.h | 10 * However, swapped word loads must be word-aligned so we can't 11 * optimize word loads in general.
|
/linux-4.1.27/arch/powerpc/include/asm/ |
H A D | barrier.h | 13 * loads and stores to non-cacheable memory (e.g. I/O devices). 15 * mb() prevents loads and stores being reordered across this point. 16 * rmb() prevents loads being reordered across this point. 18 * read_barrier_depends() prevents data-dependent loads being reordered 27 * doesn't order loads with respect to previous stores. Lwsync can be
|
H A D | io.h | 216 /* There is no asm instructions for 64 bits reverse loads and stores */ in_le64() 230 /* There is no asm instructions for 64 bits reverse loads and stores */ in_be64()
|
H A D | cpm1.h | 12 * On the MBX board, EPPC-Bug loads CPM microcode into the first 512
|
H A D | ppc_asm.h | 310 * LOAD_REG_ADDRBASE loads part of the address of label 'name' into
|
/linux-4.1.27/include/uapi/linux/ |
H A D | sysinfo.h | 9 __kernel_ulong_t loads[3]; /* 1, 5, and 15 minute load averages */ member in struct:sysinfo
|
H A D | kexec.h | 4 /* kexec system call - It loads the new kernel to boot into.
|
/linux-4.1.27/samples/bpf/ |
H A D | bpf_load.h | 16 * . loads eBPF programs via BPF syscall
|
H A D | sock_example.c | 4 * - loads eBPF program:
|
/linux-4.1.27/drivers/net/appletalk/ |
H A D | cops_ffdrv.h | 5 * side driver and the routine that loads this data into the card are.
|
H A D | cops_ltdrv.h | 4 * side driver and the routine that loads this data into the card are.
|
/linux-4.1.27/arch/mips/math-emu/ |
H A D | me-debugfs.c | 54 FPU_STAT_CREATE(loads); debugfs_fpuemu()
|
H A D | cp1emu.c | 1054 MIPS_FPU_EMU_INC_STATS(loads); cop1Emulate() 1089 MIPS_FPU_EMU_INC_STATS(loads); cop1Emulate() 1469 MIPS_FPU_EMU_INC_STATS(loads); fpux_emu() 1566 MIPS_FPU_EMU_INC_STATS(loads); fpux_emu()
|
/linux-4.1.27/arch/blackfin/include/asm/ |
H A D | cache.h | 14 * Blackfin loads 32 bytes for cache
|
H A D | entry.h | 27 to the stack, loads the IRQ number, and jumps to common code. */
|
/linux-4.1.27/arch/hexagon/mm/ |
H A D | copy_from_user.S | 34 * copy from user: loads can fault 104 /* COPY FROM USER: only loads can fail */
|
/linux-4.1.27/arch/mips/include/asm/octeon/ |
H A D | octeon.h | 223 * loads/stores can use XKPHYS addresses with 226 /* R/W If set (and UX set), user-level loads/stores 230 * loads/stores can use XKPHYS addresses with 233 /* R/W If set (and UX set), user-level loads/stores 266 /* R/W If set, CVMSEG is available for loads/stores in 269 /* R/W If set, CVMSEG is available for loads/stores in 272 /* R/W If set, CVMSEG is available for loads/stores in
|
H A D | cvmx-address.h | 189 /* there should only be stores to IOBDMA space, no loads */
|
H A D | cvmx-pow.h | 242 * Address for new work request loads (did<2:0> == 0) 275 * Address for loads to get POW internal status 323 * Address for memory loads to get POW internal state 367 * Address for index/pointer loads 468 * Response to new work request loads 1142 * Response to NULL_RD request loads 1180 * - No other loads/stores have an affect on the pending switch bit
|
/linux-4.1.27/arch/tile/lib/ |
H A D | cacheflush.c | 65 * below, before any further loads, so there's no need to do it finv_buffer_remote() 112 * trap to issue loads directly to each hash-for-home tile for finv_buffer_remote() 142 * Fire all the loads we need. The MAF only has eight entries finv_buffer_remote() 143 * so we can have at most eight outstanding loads, so we finv_buffer_remote() 151 * Repeat, but with finv's instead of loads, to get rid of the finv_buffer_remote()
|
H A D | memcpy_64.c | 269 * Do all the loads before wh64. This memcpy()
|
H A D | memcpy_32.S | 253 * used by the loads initiated by the caller. 432 * avoids shunting the loads to the RTF.
|
/linux-4.1.27/arch/sh/mm/ |
H A D | extable_64.c | 26 * many loads and stores in them to make it at all practical to label
|
/linux-4.1.27/arch/sparc/include/asm/ |
H A D | switch_to_64.h | 17 * not preserve it's value. Hairy, but it lets us remove 2 loads
|
H A D | estate.h | 15 * of the E-cache by the local processor for: 1) data loads 2) instruction
|
H A D | trap_block.h | 157 /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ 162 /* Clobbers TMP, loads DEST with current thread info pointer. */ 195 /* Clobbers TMP, loads local processor's IRQ work area into DEST. */
|
H A D | spitfire.h | 211 * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
|
H A D | io_64.h | 396 * using physically addressed loads and stores, so this does nothing.
|
H A D | tsb.h | 53 /* Some cpus support physical address quad loads. We want to use
|
H A D | ttable.h | 220 * now we use immediate ASI loads and stores instead. Kudos
|
/linux-4.1.27/arch/sparc/lib/ |
H A D | user_fixup.c | 15 * block loads and stores can be very complicated.
|
H A D | NGmemcpy.S | 137 * we can do the 16-byte loads offset by -8 bytes and the
|
H A D | checksum_32.S | 483 * we only bother with faults on loads... */
|
/linux-4.1.27/arch/tile/kernel/ |
H A D | vmlinux.lds.S | 7 /* Text loads starting from the supervisor interrupt vector address. */
|
H A D | single_step.c | 138 * FIXME: we could handle non-GPR loads by getting the real value rewrite_load_store_unaligned() 505 /* loads and iret */ single_step_once() 607 /* loads */ single_step_once()
|
/linux-4.1.27/arch/mips/include/asm/ |
H A D | mips-r2-to-r6-emul.h | 22 u64 loads; member in struct:mips_r2_emulator_stats
|
H A D | fpu_emulator.h | 37 unsigned long loads; member in struct:mips_fpu_emulator_stats
|
/linux-4.1.27/arch/sh/boot/romimage/ |
H A D | mmcif-sh7724.c | 27 * loads the romImage from an MMC card starting from block 512
|
/linux-4.1.27/arch/mips/include/asm/sn/ |
H A D | mapped_kernel.h | 17 * on IRIX to see where the sections go. The Origin loader loads
|
/linux-4.1.27/arch/avr32/lib/ |
H A D | memcpy.S | 12 * This implementation does word-aligned loads in the main loop,
|
/linux-4.1.27/arch/c6x/include/asm/ |
H A D | uaccess.h | 23 * C6X supports unaligned 32 and 64 bit loads and stores.
|
/linux-4.1.27/sound/soc/codecs/ |
H A D | sigmadsp-i2c.c | 71 * Allocates a SigmaDSP instance and loads the specified firmware file.
|
H A D | sigmadsp-regmap.c | 36 * Allocates a SigmaDSP instance and loads the specified firmware file.
|
H A D | sigmadsp.c | 119 /* safeload loads up to 20 bytes in a atomic operation */ sigmadsp_ctrl_write() 563 * Allocates a SigmaDSP instance and loads the specified firmware file.
|
/linux-4.1.27/arch/tile/include/asm/ |
H A D | barrier.h | 30 * wait until those loads have completed.
|
H A D | cacheflush.h | 144 * true, we will do a more expensive flush involving additional loads
|
/linux-4.1.27/arch/ia64/lib/ |
H A D | strlen.S | 24 // of compute zero index (czx), parallel comparisons, speculative loads and 33 // We use speculative loads and software pipelining to hide memory 39 // only normal loads. If we still get a fault then we generate a
|
H A D | strlen_user.S | 29 // loads and loop unroll using rotating registers. 37 // We use speculative loads and software pipelining to hide memory 43 // only normal loads. If we still get a fault then we return an
|
H A D | copy_user.S | 8 * faults on loads. When writing to user space we must catch 418 // - the pipeline: loads/stores are not in sync (pipeline) 425 // previous loads not yet store in transit. Such data must be 468 // main diffculty comes from the fact that loads/stores are 470 // to previous successful loads must be executed. 498 // size loads, e.g. failing ld4 means no ld1 nor ld2 executed 562 // of doing loads we fill the pipeline with zeroes
|
H A D | copy_page_mck.S | 14 * - use regular loads and stores to prefetch data to avoid consuming M-slot just for
|
H A D | memcpy_mck.S | 539 * Take copy_from_user as an example, in the code there are multiple loads 540 * in a bundle and those multiple loads could span over two pages, the
|
/linux-4.1.27/arch/alpha/lib/ |
H A D | ev6-copy_user.S | 75 EXI( ldbu $1,0($7) ) # .. .. .. L : Keep loads separate from stores 127 EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad 214 EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad
|
H A D | ev6-csum_ipv6_magic.S | 51 * The cost is 16 instructions (~8 cycles), including two extra loads which
|
/linux-4.1.27/include/linux/regulator/ |
H A D | consumer.h | 28 * idling. Regulator r has > 90% efficiency in NORMAL mode at loads > 100mA 30 * efficiency in IDLE mode at loads < 10mA. Thus regulator r will operate 31 * in normal mode for loads > 10mA and in IDLE mode for load <= 10mA. 66 * loads. Can be used for devices that have a low power 72 * light loads. Can be used by devices when they are
|
/linux-4.1.27/drivers/staging/rtl8712/ |
H A D | rtl8712_recv.h | 33 * loads, even 8 receive buffers might not be enough; cutting it to 4 seemed
|
/linux-4.1.27/include/linux/ |
H A D | uaccess.h | 30 * make sure to issue those last loads/stores before enabling pagefault_enable()
|
H A D | compiler.h | 433 "Need native word sized stores/loads for atomicity.")
|
/linux-4.1.27/arch/powerpc/xmon/ |
H A D | spu.h | 102 /* For branching, immediate loads, hbr and lqa/stqa. */
|
/linux-4.1.27/arch/arm64/ |
H A D | Makefile | 21 KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
|
/linux-4.1.27/arch/ia64/include/asm/ |
H A D | barrier.h | 61 * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no
|
/linux-4.1.27/arch/arm/mach-rpc/ |
H A D | riscpc.c | 97 * RiscPC can't handle half-word loads and stores rpc_map_io()
|
/linux-4.1.27/arch/arc/include/asm/ |
H A D | thread_info.h | 11 * the generic version (get_free_page) would be loads better.
|
/linux-4.1.27/include/linux/i2c/ |
H A D | adp8860.h | 146 * loads that are not connected through the ADP8863 diode drivers.
|
/linux-4.1.27/arch/x86/kernel/ |
H A D | machine_kexec_32.c | 33 /* ia32 supports unaliged loads & stores */ set_idt() 45 /* ia32 supports unaligned loads & stores */ set_gdt()
|
H A D | machine_kexec_64.c | 147 /* x86-64 supports unaliged loads & stores */ set_idt() 162 /* x86-64 supports unaligned loads & stores */ set_gdt()
|
H A D | process_64.c | 294 * Load TLS before restoring any segments so that segment loads __switch_to() 311 * nonzero) loads the full descriptor from the GDT or LDT. The __switch_to()
|
H A D | process.c | 433 * So for default C1, which is used during boot until cpuidle loads,
|
/linux-4.1.27/arch/x86/vdso/ |
H A D | vclock_gettime.c | 184 * with respect to loads. The various CPU manuals are unclear vread_tsc() 185 * as to whether rdtsc can be reordered with later loads, vread_tsc()
|
/linux-4.1.27/arch/powerpc/kernel/ |
H A D | align.c | 222 * bottom 4 bytes of each register, and the loads clear the 345 * Emulate floating-point pair loads and stores. 461 * Emulate SPE loads and stores. 718 /* For 8 byte loads, zero the low 8 bytes */ emulate_vsx() 821 /* Byteswap little endian loads and stores */ fix_alignment() 913 /* Special case for 16-byte FP loads and stores */ fix_alignment() 918 /* Special case for 16-byte loads and stores */ fix_alignment() 989 case LD+SE: /* sign extending integer loads */ fix_alignment()
|
H A D | fpu.S | 242 * single-precision loads and stores.
|
H A D | uprobes.c | 121 * On powerpc, except for loads and stores, most instructions arch_uprobe_post_xol()
|
H A D | signal_32.c | 887 /* Now, recheckpoint. This loads up all of the checkpointed (older) restore_tm_user_regs() 894 /* This loads the checkpointed FP/VEC state, if used */ restore_tm_user_regs() 897 /* This loads the speculative FP/VEC state, if used */ restore_tm_user_regs()
|
H A D | head_8xx.S | 68 * code there loads up some registers before calling us: 292 * TLB. The task switch loads the M_TW register with the pointer to the first
|
H A D | signal_64.c | 535 /* This loads the checkpointed FP/VEC state, if used */ restore_tm_sigcontexts() 538 /* This loads the speculative FP/VEC state, if used */ restore_tm_sigcontexts()
|
H A D | legacy_serial.c | 527 * finally loads.
|
/linux-4.1.27/arch/powerpc/lib/ |
H A D | memcpy_64.S | 108 ld r9,0(r4) # 3+2n loads, 2+2n stores 120 0: ld r0,0(r4) # 4+2n loads, 3+2n stores
|
H A D | copyuser_power7.S | 159 /* Now do cacheline (128B) sized loads and stores. */ 431 * Now do cacheline sized loads and stores. By this stage the 616 * Now do cacheline sized loads and stores. By this stage the
|
H A D | memcpy_power7.S | 92 /* Now do cacheline (128B) sized loads and stores. */ 364 * Now do cacheline sized loads and stores. By this stage the 550 * Now do cacheline sized loads and stores. By this stage the
|
H A D | copyuser_64.S | 127 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ 138 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
|
H A D | ldstfp.S | 2 * Floating-point, VMX/Altivec and VSX loads and stores
|
/linux-4.1.27/tools/testing/selftests/powerpc/copyloops/ |
H A D | memcpy_64.S | 108 ld r9,0(r4) # 3+2n loads, 2+2n stores 120 0: ld r0,0(r4) # 4+2n loads, 3+2n stores
|
H A D | copyuser_power7.S | 159 /* Now do cacheline (128B) sized loads and stores. */ 431 * Now do cacheline sized loads and stores. By this stage the 616 * Now do cacheline sized loads and stores. By this stage the
|
H A D | memcpy_power7.S | 92 /* Now do cacheline (128B) sized loads and stores. */ 364 * Now do cacheline sized loads and stores. By this stage the 550 * Now do cacheline sized loads and stores. By this stage the
|
H A D | copyuser_64.S | 127 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ 138 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
|
/linux-4.1.27/arch/powerpc/net/ |
H A D | bpf_jit_comp.c | 38 /* If we call any helpers (for loads), save LR */ bpf_jit_build_prologue() 305 /*** Constant loads/M[] access ***/ bpf_jit_build_body() 336 /*** Ancillary info loads ***/ bpf_jit_build_body() 407 /*** Absolute loads from packet header/data ***/ bpf_jit_build_body() 430 /*** Indirect loads from packet header/data ***/ bpf_jit_build_body()
|
/linux-4.1.27/kernel/sched/ |
H A D | proc.c | 66 * @loads: pointer to dest load array 72 void get_avenrun(unsigned long *loads, unsigned long offset, int shift) get_avenrun() argument 74 loads[0] = (avenrun[0] + offset) << shift; get_avenrun() 75 loads[1] = (avenrun[1] + offset) << shift; get_avenrun() 76 loads[2] = (avenrun[2] + offset) << shift; get_avenrun()
|
H A D | completion.c | 309 * the loads of ->done and ->wait.lock such that we cannot observe completion_done()
|
/linux-4.1.27/drivers/net/wireless/iwlegacy/ |
H A D | prph.h | 109 * internally loads the short bootstrap program from the special SRAM into the 113 * This bootstrap program loads (via PCI busmaster DMA) instructions and data 116 * four BSM_DRAM_* registers. Once the bootstrap program loads the new program, 124 * After signal from host, it then loads and starts the runtime program. 151 * 6) Enable future boot loads whenever NIC's power management triggers it: 157 * The bootstrap uCode (already in instruction SRAM) loads initialization 182 * When powering back up, the BSM loads the bootstrap program. Bootstrap looks
|
H A D | 4965.c | 316 * up after power-saving sleeps (or during initial uCode load), the BSM loads 319 * The bootstrap program loads (via DMA) instructions and data for a new 337 * When powering back up, the BSM loads the bootstrap program. This reloads 412 /* Enable future boot loads whenever power management unit triggers it il4965_load_bsm()
|
H A D | 3945.c | 2546 * up after power-saving sleeps (or during initial uCode load), the BSM loads 2549 * The bootstrap program loads (via DMA) instructions and data for a new 2567 * When powering back up, the BSM loads the bootstrap program. This reloads 2639 /* Enable future boot loads whenever power management unit triggers it il3945_load_bsm()
|
/linux-4.1.27/arch/mips/cavium-octeon/ |
H A D | setup.c | 523 /* R/W If set (and SX set), supervisor-level loads/stores can octeon_user_io_init() 527 /* R/W If set (and UX set), user-level loads/stores can use octeon_user_io_init() 531 /* R/W If set (and SX set), supervisor-level loads/stores can octeon_user_io_init() 535 /* R/W If set (and UX set), user-level loads/stores can use octeon_user_io_init() 576 /* R/W If set, CVMSEG is available for loads/stores in octeon_user_io_init() 583 /* R/W If set, CVMSEG is available for loads/stores in octeon_user_io_init() 586 /* R/W If set, CVMSEG is available for loads/stores in user octeon_user_io_init()
|
H A D | octeon-memcpy.S | 55 * The exception handler for loads requires that: 64 * (3) is met by not doing loads between a pair of increments of dst and src
|
/linux-4.1.27/arch/mips/kernel/ |
H A D | mips-r2-to-r6-emul.c | 1267 MIPS_R2_STATS(loads); mipsr2_decoder() 1341 MIPS_R2_STATS(loads); mipsr2_decoder() 1601 MIPS_R2_STATS(loads); mipsr2_decoder() 1720 MIPS_R2_STATS(loads); mipsr2_decoder() 2259 seq_printf(s, "loads\t\t%ld\t%ld\n", mipsr2_stats_show() 2260 (unsigned long)__this_cpu_read(mipsr2emustats.loads), mipsr2_stats_show() 2261 (unsigned long)__this_cpu_read(mipsr2bdemustats.loads)); mipsr2_stats_show() 2317 __this_cpu_write((mipsr2emustats).loads, 0); mipsr2_stats_clear_show() 2318 __this_cpu_write((mipsr2bdemustats).loads, 0); mipsr2_stats_clear_show()
|
H A D | pm-cps.c | 294 /* Base address for loads */ cps_gen_flush_fsb() 300 /* Perform some loads to fill the FSB */ cps_gen_flush_fsb()
|
/linux-4.1.27/drivers/iio/humidity/ |
H A D | si7020.c | 122 /* Reset device, loads default settings. */ si7020_probe()
|
/linux-4.1.27/arch/x86/include/asm/ |
H A D | special_insns.h | 18 * all loads stores around it, which can hurt performance. Solution is to
|
H A D | bitops.h | 152 * older loads. Same principle as spin_unlock.
|
/linux-4.1.27/include/linux/platform_data/ |
H A D | emif_plat.h | 77 * (typically at lower loads indicated by lower OPPs)
|
/linux-4.1.27/arch/mn10300/boot/tools/ |
H A D | build.c | 9 * - bootsect: exactly 512 bytes of 8086 machine code, loads the rest
|
/linux-4.1.27/arch/arm64/include/asm/ |
H A D | spinlock.h | 167 * It exclusively loads the lock value, increments it and stores the new value
|
/linux-4.1.27/arch/ia64/hp/common/ |
H A D | aml_nfw.c | 21 * handler loads up the arguments, makes the firmware call, and returns the
|
/linux-4.1.27/arch/arm/mach-tegra/ |
H A D | sleep.h | 85 /* loads a 32-bit value into a register without a data access */
|
/linux-4.1.27/arch/arm/kvm/ |
H A D | mmio.c | 89 * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
|
/linux-4.1.27/arch/arc/kernel/ |
H A D | process.c | 36 * and it will be loads better than copy-to-user, which is a definite
|
/linux-4.1.27/arch/arc/mm/ |
H A D | fault.c | 205 * to some fixup code that loads an appropriate error do_page_fault()
|
H A D | tlbex.S | 131 ; When Creating TLB Entries, instead of doing 3 dependent loads from memory,
|
/linux-4.1.27/lib/ |
H A D | sha1.c | 24 * try to do the silly "optimize away loads" part because it won't
|
/linux-4.1.27/drivers/pci/pcie/aer/ |
H A D | aerdrv.c | 120 * Invoked when PCIe bus loads AER service driver. 299 * Invoked when PCI Express bus loads AER service driver.
|
/linux-4.1.27/arch/cris/boot/rescue/ |
H A D | head_v10.S | 78 ;; The length is enough for downloading code that loads the rest 80 ;; It is the same length as the on-chip ROM loads, so the same
|
/linux-4.1.27/arch/ia64/kernel/ |
H A D | unaligned.c | 674 * Given the way we handle unaligned speculative loads, we should emulate_load_updates() 779 * check for updates on any kind of loads emulate_load_int() 785 * handling of various loads (based on EAS2.4): emulate_load_int() 1149 * See comments in ldX for descriptions on how the various loads are handled. emulate_load_float() 1186 * check for updates on any loads emulate_load_float() 1192 * invalidate ALAT entry in case of advanced floating point loads emulate_load_float() 1412 * speculative loads: ia64_handle_unaligned()
|
H A D | ivt.S | 206 * cannot possibly affect the following loads: 275 * cannot possibly affect the following loads: 319 * cannot possibly affect the following loads: 562 * cannot possibly affect the following loads: 628 * cannot possibly affect the following loads: 682 * cannot possibly affect the following loads: 799 // If any of the above loads miss in L1D, we'll stall here until
|
H A D | palinfo.c | 97 "eager loads", 98 "eager loads and stores"
|
/linux-4.1.27/arch/powerpc/mm/ |
H A D | tlb_low_64e.S | 260 * Indirect entries in TLB1, hardware loads resulting direct entries 541 /* XXX replace the RMW cycles with immediate loads + writes */ 589 /* XXX replace the RMW cycles with immediate loads + writes */ 767 /* XXX replace the RMW cycles with immediate loads + writes */ 948 /* XXX replace the RMW cycles with immediate loads + writes */ 992 /* XXX replace the RMW cycles with immediate loads + writes */
|
/linux-4.1.27/arch/xtensa/variants/dc233c/include/variant/ |
H A D | core.h | 120 #define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */ 122 #define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
|
/linux-4.1.27/arch/sparc/mm/ |
H A D | fault_64.c | 370 /* All loads, stores and atomics have bits 30 and 31 both set do_sparc64_fault() 390 /* Non-faulting loads shouldn't expand stack. */ do_sparc64_fault()
|
/linux-4.1.27/drivers/w1/masters/ |
H A D | omap_hdq.c | 171 /* ISR loads it with new INT_STATUS */ hdq_write_byte() 310 /* ISR loads it with new INT_STATUS */ omap_hdq_break()
|
/linux-4.1.27/arch/s390/kernel/ |
H A D | crash_dump.c | 548 * Initialize ELF loads (new kernel) 637 /* Init loads */ elfcorehdr_alloc()
|
/linux-4.1.27/kernel/ |
H A D | sys.c | 2281 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); do_sysinfo() 2347 u32 loads[3]; member in struct:compat_sysinfo 2391 __put_user(s.loads[0], &info->loads[0]) || COMPAT_SYSCALL_DEFINE1() 2392 __put_user(s.loads[1], &info->loads[1]) || COMPAT_SYSCALL_DEFINE1() 2393 __put_user(s.loads[2], &info->loads[2]) || COMPAT_SYSCALL_DEFINE1()
|
H A D | jump_label.c | 265 * loads patch these with arch_get_jump_label_nop(), which is specified by
|
H A D | seccomp.c | 94 * redirects all filter code that loads struct sk_buff data 96 * enforces length and alignment checking of those loads.
|
/linux-4.1.27/drivers/media/pci/cx18/ |
H A D | cx18-gpio.c | 241 * 5. Zilog comes out of reset, loads reset vector address and resetctrl_reset()
|
/linux-4.1.27/drivers/media/rc/ |
H A D | rc-core-priv.h | 167 * loads the compiled decoders for their usage with IR raw events
|
/linux-4.1.27/drivers/dma/ |
H A D | fsldma.h | 208 /* There is no asm instructions for 64 bits reverse loads and stores */ in_le64()
|
/linux-4.1.27/arch/mips/include/asm/lasat/ |
H A D | lasat.h | 20 * Configuration for LASAT boards, loads the appropriate include files.
|
/linux-4.1.27/drivers/hv/ |
H A D | hv_snapshot.c | 289 * When this driver loads, the user level daemon that hv_vss_init()
|
H A D | hv_fcopy.c | 426 * When this driver loads, the user level daemon that hv_fcopy_init()
|
H A D | hv_kvp.c | 707 * When this driver loads, the user level daemon that hv_kvp_init()
|
/linux-4.1.27/include/linux/uwb/ |
H A D | umc.h | 21 * loads when WHCI devices are detected. These WHCI devices expose
|
/linux-4.1.27/arch/sh/drivers/pci/ |
H A D | pci.c | 276 * I/O space can be accessed via normal processor loads and stores on pci_mmap_page_range()
|
/linux-4.1.27/arch/mips/pci/ |
H A D | pci.c | 331 * I/O space can be accessed via normal processor loads and stores on pci_mmap_page_range()
|
/linux-4.1.27/arch/parisc/include/asm/ |
H A D | checksum.h | 135 ** We can execute two loads and two adds per cycle on PA 8000. csum_ipv6_magic()
|
/linux-4.1.27/arch/parisc/kernel/ |
H A D | pci.c | 229 * I/O space can be accessed via normal processor loads and stores on pci_mmap_page_range()
|
H A D | pacache.S | 435 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle. 633 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle. 688 ldd 0(%r29), %r19 /* start next loads */
|
H A D | unaligned.c | 490 if (regs->iir&0x1000) /* short loads */ handle_unaligned()
|
/linux-4.1.27/arch/cris/boot/tools/ |
H A D | build.c | 10 * - bootsect: exactly 512 bytes of 8086 machine code, loads the rest
|
/linux-4.1.27/arch/m68k/68360/ |
H A D | commproc.c | 17 * MBX, the EPPC software loads additional microcode into the
|
/linux-4.1.27/arch/arm/include/asm/ |
H A D | cmpxchg.h | 12 * since we use normal loads/stores as well, this is really bad.
|
/linux-4.1.27/tools/perf/ |
H A D | builtin-mem.c | 44 rec_argv[i++] = "cpu/mem-loads/pp"; __cmd_record()
|
/linux-4.1.27/tools/usb/ |
H A D | hcd-tests.sh | 28 # - add other concurrent system loads
|
/linux-4.1.27/kernel/debug/kdb/ |
H A D | kdb_main.c | 2526 val->loads[0] = avenrun[0]; kdb_sysinfo() 2527 val->loads[1] = avenrun[1]; kdb_sysinfo() 2528 val->loads[2] = avenrun[2]; kdb_sysinfo() 2577 LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]), kdb_summary() 2578 LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]), kdb_summary() 2579 LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2])); kdb_summary()
|
/linux-4.1.27/drivers/net/bonding/ |
H A D | bond_sysfs.c | 748 /* Permit multiple loads of the module by ignoring failures to bond_create_sysfs() 750 * created by second or subsequent loads of the module will bond_create_sysfs()
|
/linux-4.1.27/arch/sh/kernel/ |
H A D | traps_64.c | 498 case (0x40>>2): /* indexed loads */ misaligned_fixup() 544 case (0x1c>>2): /* floating indexed loads */ misaligned_fixup()
|
/linux-4.1.27/arch/x86/ia32/ |
H A D | ia32entry.S | 91 * SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs. 318 * then loads new ss, cs, and rip from previously programmed MSRs.
|
/linux-4.1.27/arch/mips/lib/ |
H A D | memcpy.S | 71 * The exception handler for loads requires that: 80 * (3) is met by not doing loads between a pair of increments of dst and src
|
H A D | csum_partial.S | 324 * The exception handler for loads requires that: 334 * (3) is met by not doing loads between a pair of increments of dst and src
|
/linux-4.1.27/drivers/lguest/ |
H A D | segments.c | 89 * now, the CPU will try to set it when the Guest first loads fixup_gdt_table()
|
/linux-4.1.27/drivers/net/ethernet/8390/ |
H A D | 8390.h | 138 * Only generate indirect loads given a machine that needs them.
|
/linux-4.1.27/drivers/staging/i2o/ |
H A D | pci.c | 361 /* Cards that fall apart if you hit them with large I/O loads... */ i2o_pci_probe()
|
/linux-4.1.27/drivers/remoteproc/ |
H A D | remoteproc_elf_loader.c | 126 * This function loads the firmware segments to memory, where the remote
|
/linux-4.1.27/arch/x86/pci/ |
H A D | i386.c | 419 /* I/O space cannot be accessed via normal processor loads and pci_mmap_page_range()
|
/linux-4.1.27/arch/xtensa/variants/fsf/include/variant/ |
H A D | core.h | 82 #define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
|
/linux-4.1.27/arch/sparc/kernel/ |
H A D | trampoline_64.S | 333 * prom_world() loads from here to restore the %asi
|
H A D | pci_sabre.c | 109 #define SABRE_PCICTRL_MRLEN 0x0000001000000000UL /* Use MemoryReadLine for block loads/stores */
|
/linux-4.1.27/drivers/gpu/drm/msm/adreno/ |
H A D | adreno_gpu.c | 160 * pending for indirect loads after the timestamp has adreno_submit()
|
/linux-4.1.27/drivers/char/ |
H A D | bsr.c | 49 It may be read using 1, 2, 4, or 8 byte loads which must be aligned since
|
/linux-4.1.27/drivers/char/hw_random/ |
H A D | omap-rng.c | 155 * during "rngtest" loads, that these delays don't omap_rng_data_present()
|
/linux-4.1.27/drivers/video/fbdev/aty/ |
H A D | mach64_accel.c | 98 /* enable all registers to be loaded for context loads */ aty_init_engine()
|
/linux-4.1.27/drivers/watchdog/ |
H A D | bfin_wdt.c | 85 * Starts the on-chip watchdog. Automatically loads WDOG_CNT
|
/linux-4.1.27/arch/powerpc/platforms/powernv/ |
H A D | subcore.c | 159 /* Order the wait loop vs any subsequent loads/stores. */ wait_for_sync_step()
|
/linux-4.1.27/arch/mn10300/kernel/ |
H A D | kgdb.c | 193 /* SETLB - loads the next four bytes into the LIR register kgdb_arch_do_singlestep()
|
H A D | kprobes.c | 258 /* SETLB - loads the next four bytes into the LIR reg */ find_nextpc()
|
/linux-4.1.27/arch/openrisc/mm/ |
H A D | fault.c | 231 * to some fixup code that loads an appropriate error do_page_fault()
|
/linux-4.1.27/arch/arm/probes/kprobes/ |
H A D | test-core.h | 426 * relative loads with out of range offsets.
|
/linux-4.1.27/arch/cris/mm/ |
H A D | fault.c | 253 * to some fixup code that loads an appropriate error do_page_fault()
|
/linux-4.1.27/arch/hexagon/kernel/ |
H A D | traps.c | 257 * Misaligned loads and stores, on the other hand, can be
|
/linux-4.1.27/arch/mips/cavium-octeon/executive/ |
H A D | cvmx-l2c.c | 299 * Invalidate L1 cache to make sure all loads result in data fault_in() 655 * mode all data loads from L2 return special debug data, not __read_l2_tag() 704 * hardware loads the virtual tag for the L2 cache cvmx_l2c_get_tag()
|
/linux-4.1.27/drivers/misc/mic/host/ |
H A D | mic_x100.c | 332 /* Ensure that the reset is ordered w.r.t. previous loads and stores */ mic_x100_hw_reset()
|
/linux-4.1.27/drivers/spi/ |
H A D | spi-omap-uwire.c | 103 * that avoids having to issue two loads for each bit of static data.
|
/linux-4.1.27/drivers/net/wireless/ti/wl12xx/ |
H A D | reg.h | 290 loads a single byte of data into the EE_DATA
|
/linux-4.1.27/arch/xtensa/variants/dc232b/include/variant/ |
H A D | core.h | 83 #define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
|
/linux-4.1.27/arch/um/os-Linux/skas/ |
H A D | process.c | 336 * This can legitimately fail if the process loads a userspace()
|
/linux-4.1.27/fs/ubifs/ |
H A D | tnc_misc.c | 402 * This function loads znode pointed to by @zbr into the TNC cache and
|
/linux-4.1.27/drivers/video/fbdev/i810/ |
H A D | i810_main.c | 223 * i810_load_pll - loads values for the hardware PLL clock 360 * i810_load_pitch - loads the characters per line of the display 392 * i810_load_color - loads the color depth of the display 414 * i810_load_regs - loads all registers for the mode
|
/linux-4.1.27/arch/powerpc/platforms/85xx/ |
H A D | p1022_ds.c | 485 * driver loads. p1022_ds_setup_arch()
|
/linux-4.1.27/arch/hexagon/lib/ |
H A D | memcpy.S | 41 * DJH 4/20/10 Version 1.4 fixed Ldword_loop_epilog loop to prevent loads
|
/linux-4.1.27/arch/m68k/fpsp040/ |
H A D | x_operr.S | 222 | Store_max loads the max pos or negative for the size, sets
|
H A D | kernel_ex.S | 237 | For disabled overflow call 'ovf_r_k'. This routine loads the
|
/linux-4.1.27/arch/alpha/boot/ |
H A D | bootpz.c | 193 BOOT_ADDR Virtual address at which the consoles loads
|
/linux-4.1.27/net/core/ |
H A D | dev_ioctl.c | 355 * privileges this function loads the module. If module loading is not
|
H A D | datagram.c | 184 * * significantly datagram socket latencies at high loads,
|
/linux-4.1.27/mm/ |
H A D | frontswap.c | 447 debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads); init_frontswap()
|
H A D | swap_state.c | 399 * random loads, swapping to hard disk or to SSD: please don't ask swapin_nr_pages()
|
/linux-4.1.27/tools/testing/selftests/exec/ |
H A D | execveat.c | 333 /* Shell loads via deleted subdir OK because name starts with .. */ run_tests()
|
/linux-4.1.27/drivers/ata/ |
H A D | pata_it821x.c | 406 * perform out own device selection timing loads before the 443 * perform out own device selection timing loads before the
|
/linux-4.1.27/drivers/misc/sgi-xp/ |
H A D | xpc_sn2.c | 656 * on subsequent loads of XPC. This amo page is never freed, and its xpc_setup_rsvd_page_sn2() 1987 smp_rmb(); /* guarantee that .get loads before .put */ xpc_get_deliverable_payload_sn2() 2108 smp_rmb(); /* guarantee that .put loads before .get */ xpc_allocate_msg_sn2()
|
/linux-4.1.27/drivers/idle/ |
H A D | intel_idle.c | 22 * intel_idle is a cpuidle driver that loads on specific Intel processors
|
/linux-4.1.27/drivers/net/wireless/iwlwifi/ |
H A D | iwl-fh.h | 88 * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
|
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/ |
H A D | ptlrpcd.c | 456 * because of some CPU core(s) busy with other loads.
|
/linux-4.1.27/drivers/staging/octeon/ |
H A D | ethernet-tx.c | 323 * shown a 25% increase in performance under some loads. cvm_oct_xmit()
|
/linux-4.1.27/drivers/net/wireless/ti/wl1251/ |
H A D | reg.h | 367 loads a single byte of data into the EE_DATA
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/engine/gr/ |
H A D | ctxnv40.c | 43 * - In the section of code that loads the per-vs state, NVIDIA check
|
/linux-4.1.27/drivers/video/fbdev/ |
H A D | cg6.c | 696 * back to back store/loads on the mode register, so copy it cg6_chip_init()
|
/linux-4.1.27/arch/powerpc/sysdev/ |
H A D | cpm1.c | 15 * MBX, the EPPC software loads additional microcode into the
|
/linux-4.1.27/arch/s390/mm/ |
H A D | extmem.c | 495 * this function loads a DCSS segment
|
/linux-4.1.27/arch/mips/include/asm/pci/ |
H A D | bridge.h | 50 * using 32-bit loads and stores.
|
/linux-4.1.27/arch/parisc/lib/ |
H A D | memcpy.c | 37 * All the loads and stores are done via explicit asm() code in order to use
|
/linux-4.1.27/arch/ia64/pci/ |
H A D | pci.c | 597 * I/O space cannot be accessed via normal processor loads and pci_mmap_page_range()
|
/linux-4.1.27/arch/m68k/mac/ |
H A D | via.c | 99 * This can be fatal since it can't be handled until the right driver loads
|