/linux-4.4.14/arch/h8300/mm/ |
H A D | Makefile | 2 # Makefile for the linux h8300-specific parts of the memory manager. 5 obj-y := init.o fault.o memory.o
|
H A D | memory.c | 2 * linux/arch/h8300/mm/memory.c 8 * linux/arch/m68knommu/mm/memory.c 15 * linux/arch/m68k/mm/memory.c
|
/linux-4.4.14/tools/testing/selftests/memory-hotplug/ |
H A D | mem-on-off-test.sh | 21 if ! ls $SYSFS/devices/system/memory/memory* > /dev/null 2>&1; then 22 echo $msg memory hotplug is not supported >&2 28 # list all hot-pluggable memory 34 for memory in $SYSFS/devices/system/memory/memory*; do 35 if grep -q 1 $memory/removable && 36 grep -q $state $memory/state; then 37 echo ${memory##/*/memory} 54 grep -q online $SYSFS/devices/system/memory/memory$1/state 59 grep -q offline $SYSFS/devices/system/memory/memory$1/state 64 echo online > $SYSFS/devices/system/memory/memory$1/state 69 echo offline > $SYSFS/devices/system/memory/memory$1/state 74 local memory=$1 76 if ! online_memory $memory; then 77 echo $FUNCNAME $memory: unexpected fail >&2 78 elif ! memory_is_online $memory; then 79 echo $FUNCNAME $memory: unexpected offline >&2 85 local memory=$1 87 if online_memory $memory 2> /dev/null; then 88 echo $FUNCNAME $memory: unexpected success >&2 89 elif ! memory_is_offline $memory; then 90 echo $FUNCNAME $memory: unexpected online >&2 96 local memory=$1 98 if ! offline_memory $memory; then 99 echo $FUNCNAME $memory: unexpected fail >&2 100 elif ! memory_is_offline $memory; then 101 echo $FUNCNAME $memory: unexpected offline >&2 107 local memory=$1 109 if offline_memory $memory 2> /dev/null; then 110 echo $FUNCNAME $memory: unexpected success >&2 111 elif ! memory_is_online $memory; then 112 echo $FUNCNAME $memory: unexpected offline >&2 126 echo "Usage $0 [ -e errno ] [ -p notifier-priority ] [ -r percent-of-memory-to-offline ]" 145 echo "Test scope: $ratio% hotplug memory" 146 echo -e "\t online all hotplug memory in offline state" 147 echo -e "\t offline $ratio% hotplug memory in online state" 148 echo -e "\t online all hotplug memory in offline state" 151 # Online all hot-pluggable memory 153 for memory in `hotplaggable_offline_memory`; do 154 echo offline-online $memory 155 online_memory_expect_success $memory 159 # Offline $ratio percent of hot-pluggable memory 161 for memory in `hotpluggable_online_memory`; do 163 echo online-offline $memory 164 offline_memory_expect_success $memory 169 # Online all hot-pluggable memory again 171 for memory in `hotplaggable_offline_memory`; do 172 echo offline-online $memory 173 online_memory_expect_success $memory 177 # Test with memory notifier error injection 181 NOTIFIER_ERR_INJECT_DIR=$DEBUGFS/notifier-error-inject/memory 187 /sbin/modprobe -q -r memory-notifier-error-inject 188 /sbin/modprobe -q memory-notifier-error-inject priority=$priority 196 echo $msg memory-notifier-error-inject module is not available >&2 204 # Offline $ratio percent of hot-pluggable memory 207 for memory in `hotpluggable_online_memory`; do 209 offline_memory_expect_success $memory 214 # Test memory hot-add error handling (offline => online) 217 for memory in `hotplaggable_offline_memory`; do 218 online_memory_expect_fail $memory 222 # Online all hot-pluggable memory 225 for memory in `hotplaggable_offline_memory`; do 226 online_memory_expect_success $memory 230 # Test memory hot-remove error handling (online => offline) 233 for memory in `hotpluggable_online_memory`; do 234 offline_memory_expect_fail $memory 238 /sbin/modprobe -q -r memory-notifier-error-inject
|
H A D | Makefile | 6 override RUN_TESTS := ./mem-on-off-test.sh -r 2 || echo "selftests: memory-hotplug [FAIL]" 10 @/bin/bash ./mem-on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
|
/linux-4.4.14/tools/arch/alpha/include/asm/ |
H A D | barrier.h | 4 #define mb() __asm__ __volatile__("mb": : :"memory") 5 #define rmb() __asm__ __volatile__("mb": : :"memory") 6 #define wmb() __asm__ __volatile__("wmb": : :"memory")
|
/linux-4.4.14/arch/mips/ar7/ |
H A D | Makefile | 5 memory.o \
|
/linux-4.4.14/arch/m68k/mm/ |
H A D | Makefile | 2 # Makefile for the linux m68k-specific parts of the memory manager. 8 obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o hwtest.o 10 obj-$(CONFIG_MMU_COLDFIRE) += kmap.o memory.o mcfmmu.o
|
/linux-4.4.14/arch/c6x/mm/ |
H A D | Makefile | 2 # Makefile for the linux c6x-specific parts of the memory manager.
|
/linux-4.4.14/arch/cris/arch-v10/mm/ |
H A D | Makefile | 2 # Makefile for the linux cris-specific parts of the memory manager.
|
/linux-4.4.14/arch/openrisc/mm/ |
H A D | Makefile | 2 # Makefile for the linux openrisc-specific parts of the memory manager.
|
/linux-4.4.14/arch/cris/mm/ |
H A D | Makefile | 2 # Makefile for the linux cris-specific parts of the memory manager.
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/ |
H A D | base.c | 26 #include <core/memory.h> 32 #define nvkm_instobj(p) container_of((p), struct nvkm_instobj, memory) 35 struct nvkm_memory memory; member in struct:nvkm_instobj 44 nvkm_instobj_target(struct nvkm_memory *memory) nvkm_instobj_target() argument 46 memory = nvkm_instobj(memory)->parent; nvkm_instobj_target() 47 return nvkm_memory_target(memory); nvkm_instobj_target() 51 nvkm_instobj_addr(struct nvkm_memory *memory) nvkm_instobj_addr() argument 53 memory = nvkm_instobj(memory)->parent; nvkm_instobj_addr() 54 return nvkm_memory_addr(memory); nvkm_instobj_addr() 58 nvkm_instobj_size(struct nvkm_memory *memory) nvkm_instobj_size() argument 60 memory = nvkm_instobj(memory)->parent; nvkm_instobj_size() 61 return nvkm_memory_size(memory); nvkm_instobj_size() 65 nvkm_instobj_release(struct nvkm_memory *memory) nvkm_instobj_release() argument 67 struct nvkm_instobj *iobj = nvkm_instobj(memory); nvkm_instobj_release() 72 nvkm_instobj_acquire(struct nvkm_memory *memory) nvkm_instobj_acquire() argument 74 return nvkm_instobj(memory)->map; nvkm_instobj_acquire() 78 nvkm_instobj_rd32(struct nvkm_memory *memory, u64 offset) nvkm_instobj_rd32() argument 80 return ioread32_native(nvkm_instobj(memory)->map + offset); nvkm_instobj_rd32() 84 nvkm_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) nvkm_instobj_wr32() argument 86 iowrite32_native(data, nvkm_instobj(memory)->map + offset); nvkm_instobj_wr32() 90 nvkm_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset) nvkm_instobj_map() argument 92 memory = nvkm_instobj(memory)->parent; nvkm_instobj_map() 93 nvkm_memory_map(memory, vma, offset); nvkm_instobj_map() 97 nvkm_instobj_dtor(struct nvkm_memory *memory) nvkm_instobj_dtor() argument 99 struct nvkm_instobj *iobj = nvkm_instobj(memory); nvkm_instobj_dtor() 121 nvkm_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm) nvkm_instobj_boot() argument 123 memory = nvkm_instobj(memory)->parent; nvkm_instobj_boot() 124 nvkm_memory_boot(memory, vm); nvkm_instobj_boot() 128 nvkm_instobj_release_slow(struct nvkm_memory *memory) nvkm_instobj_release_slow() argument 130 struct nvkm_instobj *iobj = nvkm_instobj(memory); nvkm_instobj_release_slow() 131 nvkm_instobj_release(memory); nvkm_instobj_release_slow() 136 nvkm_instobj_acquire_slow(struct nvkm_memory *memory) nvkm_instobj_acquire_slow() argument 138 struct nvkm_instobj *iobj = nvkm_instobj(memory); nvkm_instobj_acquire_slow() 141 memory->func = &nvkm_instobj_func; nvkm_instobj_acquire_slow() 146 nvkm_instobj_rd32_slow(struct nvkm_memory *memory, u64 offset) nvkm_instobj_rd32_slow() argument 148 struct nvkm_instobj *iobj = nvkm_instobj(memory); nvkm_instobj_rd32_slow() 153 nvkm_instobj_wr32_slow(struct nvkm_memory *memory, u64 offset, u32 data) nvkm_instobj_wr32_slow() argument 155 struct nvkm_instobj *iobj = nvkm_instobj(memory); nvkm_instobj_wr32_slow() 177 struct nvkm_memory *memory = NULL; nvkm_instobj_new() local 182 ret = imem->func->memory_new(imem, size, align, zero, &memory); nvkm_instobj_new() 192 nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); nvkm_instobj_new() 193 iobj->parent = memory; nvkm_instobj_new() 198 memory = &iobj->memory; nvkm_instobj_new() 202 void __iomem *map = nvkm_kmap(memory); nvkm_instobj_new() 205 nvkm_wo32(memory, offset, 0x00000000); nvkm_instobj_new() 209 nvkm_done(memory); nvkm_instobj_new() 214 nvkm_memory_del(&memory); nvkm_instobj_new() 215 *pmemory = memory; nvkm_instobj_new() 247 struct nvkm_memory *memory = iobj->parent; nvkm_instmem_fini() local 248 u64 size = nvkm_memory_size(memory); nvkm_instmem_fini() 255 iobj->suspend[i / 4] = nvkm_ro32(memory, i); nvkm_instmem_fini() 280 struct nvkm_memory *memory = iobj->parent; nvkm_instmem_init() local 281 u64 size = nvkm_memory_size(memory); nvkm_instmem_init() 283 nvkm_wo32(memory, i, iobj->suspend[i / 4]); nvkm_instmem_init()
|
H A D | nv50.c | 27 #include <core/memory.h> 42 #define nv50_instobj(p) container_of((p), struct nv50_instobj, memory) 45 struct nvkm_memory memory; member in struct:nv50_instobj 53 nv50_instobj_target(struct nvkm_memory *memory) nv50_instobj_target() argument 59 nv50_instobj_addr(struct nvkm_memory *memory) nv50_instobj_addr() argument 61 return nv50_instobj(memory)->mem->offset; nv50_instobj_addr() 65 nv50_instobj_size(struct nvkm_memory *memory) nv50_instobj_size() argument 67 return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT; nv50_instobj_size() 71 nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm) nv50_instobj_boot() argument 73 struct nv50_instobj *iobj = nv50_instobj(memory); nv50_instobj_boot() 76 u64 size = nvkm_memory_size(memory); nv50_instobj_boot() 87 nvkm_memory_map(memory, &iobj->bar, 0); nv50_instobj_boot() 99 nv50_instobj_release(struct nvkm_memory *memory) nv50_instobj_release() argument 101 struct nv50_instmem *imem = nv50_instobj(memory)->imem; nv50_instobj_release() 106 nv50_instobj_acquire(struct nvkm_memory *memory) nv50_instobj_acquire() argument 108 struct nv50_instobj *iobj = nv50_instobj(memory); nv50_instobj_acquire() 115 nvkm_memory_boot(memory, vm); nv50_instobj_acquire() 125 nv50_instobj_rd32(struct nvkm_memory *memory, u64 offset) nv50_instobj_rd32() argument 127 struct nv50_instobj *iobj = nv50_instobj(memory); nv50_instobj_rd32() 143 nv50_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) nv50_instobj_wr32() argument 145 struct nv50_instobj *iobj = nv50_instobj(memory); nv50_instobj_wr32() 159 nv50_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset) nv50_instobj_map() argument 161 struct nv50_instobj *iobj = nv50_instobj(memory); nv50_instobj_map() 166 nv50_instobj_dtor(struct nvkm_memory *memory) nv50_instobj_dtor() argument 168 struct nv50_instobj *iobj = nv50_instobj(memory); nv50_instobj_dtor() 203 *pmemory = &iobj->memory; nv50_instobj_new() 205 nvkm_memory_ctor(&nv50_instobj_func, &iobj->memory); nv50_instobj_new()
|
H A D | nv04.c | 27 #include <core/memory.h> 38 #define nv04_instobj(p) container_of((p), struct nv04_instobj, memory) 41 struct nvkm_memory memory; member in struct:nv04_instobj 47 nv04_instobj_target(struct nvkm_memory *memory) nv04_instobj_target() argument 53 nv04_instobj_addr(struct nvkm_memory *memory) nv04_instobj_addr() argument 55 return nv04_instobj(memory)->node->offset; nv04_instobj_addr() 59 nv04_instobj_size(struct nvkm_memory *memory) nv04_instobj_size() argument 61 return nv04_instobj(memory)->node->length; nv04_instobj_size() 65 nv04_instobj_acquire(struct nvkm_memory *memory) nv04_instobj_acquire() argument 67 struct nv04_instobj *iobj = nv04_instobj(memory); nv04_instobj_acquire() 73 nv04_instobj_release(struct nvkm_memory *memory) nv04_instobj_release() argument 78 nv04_instobj_rd32(struct nvkm_memory *memory, u64 offset) nv04_instobj_rd32() argument 80 struct nv04_instobj *iobj = nv04_instobj(memory); nv04_instobj_rd32() 86 nv04_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) nv04_instobj_wr32() argument 88 struct nv04_instobj *iobj = nv04_instobj(memory); nv04_instobj_wr32() 94 nv04_instobj_dtor(struct nvkm_memory *memory) nv04_instobj_dtor() argument 96 struct nv04_instobj *iobj = nv04_instobj(memory); nv04_instobj_dtor() 125 *pmemory = &iobj->memory; nv04_instobj_new() 127 nvkm_memory_ctor(&nv04_instobj_func, &iobj->memory); nv04_instobj_new()
|
H A D | nv40.c | 27 #include <core/memory.h> 40 #define nv40_instobj(p) container_of((p), struct nv40_instobj, memory) 43 struct nvkm_memory memory; member in struct:nv40_instobj 49 nv40_instobj_target(struct nvkm_memory *memory) nv40_instobj_target() argument 55 nv40_instobj_addr(struct nvkm_memory *memory) nv40_instobj_addr() argument 57 return nv40_instobj(memory)->node->offset; nv40_instobj_addr() 61 nv40_instobj_size(struct nvkm_memory *memory) nv40_instobj_size() argument 63 return nv40_instobj(memory)->node->length; nv40_instobj_size() 67 nv40_instobj_acquire(struct nvkm_memory *memory) nv40_instobj_acquire() argument 69 struct nv40_instobj *iobj = nv40_instobj(memory); nv40_instobj_acquire() 74 nv40_instobj_release(struct nvkm_memory *memory) nv40_instobj_release() argument 79 nv40_instobj_rd32(struct nvkm_memory *memory, u64 offset) nv40_instobj_rd32() argument 81 struct nv40_instobj *iobj = nv40_instobj(memory); nv40_instobj_rd32() 86 nv40_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) nv40_instobj_wr32() argument 88 struct nv40_instobj *iobj = nv40_instobj(memory); nv40_instobj_wr32() 93 nv40_instobj_dtor(struct nvkm_memory *memory) nv40_instobj_dtor() argument 95 struct nv40_instobj *iobj = nv40_instobj(memory); nv40_instobj_dtor() 124 *pmemory = &iobj->memory; nv40_instobj_new() 126 nvkm_memory_ctor(&nv40_instobj_func, &iobj->memory); nv40_instobj_new()
|
H A D | gk20a.c | 24 * GK20A does not have dedicated video memory, and to accurately represent this 26 * implementation must be done directly on top of system memory, while 30 * 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory 33 * contiguous memory. 46 #include <core/memory.h> 53 struct nvkm_memory memory; member in struct:gk20a_instobj 61 #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory) 116 gk20a_instobj_target(struct nvkm_memory *memory) gk20a_instobj_target() argument 122 gk20a_instobj_addr(struct nvkm_memory *memory) gk20a_instobj_addr() argument 124 return gk20a_instobj(memory)->mem.offset; gk20a_instobj_addr() 128 gk20a_instobj_size(struct nvkm_memory *memory) gk20a_instobj_size() argument 130 return (u64)gk20a_instobj(memory)->mem.size << 12; gk20a_instobj_size() 134 gk20a_instobj_cpu_map_dma(struct nvkm_memory *memory) gk20a_instobj_cpu_map_dma() argument 137 struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); gk20a_instobj_cpu_map_dma() 139 int npages = nvkm_memory_size(memory) >> 12; gk20a_instobj_cpu_map_dma() 157 gk20a_instobj_cpu_map_iommu(struct nvkm_memory *memory) gk20a_instobj_cpu_map_iommu() argument 159 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); gk20a_instobj_cpu_map_iommu() 160 int npages = nvkm_memory_size(memory) >> 12; gk20a_instobj_cpu_map_iommu() 184 imem->vaddr_use -= nvkm_memory_size(&obj->memory); gk20a_instmem_vaddr_gc() 192 gk20a_instobj_acquire(struct nvkm_memory *memory) gk20a_instobj_acquire() argument 194 struct gk20a_instobj *node = gk20a_instobj(memory); gk20a_instobj_acquire() 197 const u64 size = nvkm_memory_size(memory); gk20a_instobj_acquire() 214 node->vaddr = imem->cpu_map(memory); gk20a_instobj_acquire() 233 gk20a_instobj_release(struct nvkm_memory *memory) gk20a_instobj_release() argument 235 struct gk20a_instobj *node = gk20a_instobj(memory); gk20a_instobj_release() 252 gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset) gk20a_instobj_rd32() argument 254 struct gk20a_instobj *node = gk20a_instobj(memory); gk20a_instobj_rd32() 260 gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) gk20a_instobj_wr32() argument 262 struct gk20a_instobj *node = gk20a_instobj(memory); gk20a_instobj_wr32() 268 gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset) gk20a_instobj_map() argument 270 struct gk20a_instobj *node = gk20a_instobj(memory); gk20a_instobj_map() 298 imem->vaddr_use -= nvkm_memory_size(&node->memory); gk20a_instobj_dtor() 307 gk20a_instobj_dtor_dma(struct nvkm_memory *memory) gk20a_instobj_dtor_dma() argument 309 struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); gk20a_instobj_dtor_dma() 326 gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) gk20a_instobj_dtor_iommu() argument 328 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); gk20a_instobj_dtor_iommu() 401 nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory); gk20a_instobj_ctor_dma() 407 nvkm_error(subdev, "cannot allocate DMA memory\n"); gk20a_instobj_ctor_dma() 414 "memory not aligned as requested: %pad (0x%x)\n", gk20a_instobj_ctor_dma() 417 /* present memory for being mapped using small pages */ gk20a_instobj_ctor_dma() 451 nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory); gk20a_instobj_ctor_iommu() 453 /* Allocate backing memory */ gk20a_instobj_ctor_iommu() 548 *pmemory = node ? &node->memory : NULL; gk20a_instobj_new() 554 /* present memory for being mapped using small pages */ gk20a_instobj_new() 618 /* We will access the memory through our own mapping */ gk20a_instmem_new()
|
/linux-4.4.14/arch/alpha/mm/ |
H A D | Makefile | 2 # Makefile for the linux alpha-specific parts of the memory manager.
|
/linux-4.4.14/arch/hexagon/mm/ |
H A D | Makefile | 2 # Makefile for Hexagon memory management subsystem
|
H A D | init.c | 32 * that corresponds to the end of real or simulated platform memory. 45 /* indicate pfn's of high memory */ 64 * mem_init - initializes memory 68 * Calculates and displays memory available/used 82 * This can be moved to some more virtual-memory-specific mem_init() 91 * free_initmem - frees memory used by stuff declared with __init 101 * free_initrd_mem - frees... initrd memory. 102 * @start - start of init memory 103 * @end - end of init memory 105 * Apparently has to be passed the address of the initrd memory. 137 * give ZONE_NORMAL all the memory, including the big holes paging_init() 148 * Start of high memory area. Will probably need something more paging_init() 162 * Pick out the memory size. We look for mem=size, 188 * Set up boot memory allocator setup_arch_memory() 203 * memory allocation setup_arch_memory() 219 * higher than what we have memory for. setup_arch_memory() 263 * Free all the memory that wasn't taken up by the bootmap, the DMA setup_arch_memory() 271 * The bootmem allocator seemingly just lives to feed memory setup_arch_memory()
|
/linux-4.4.14/arch/arm/include/asm/ |
H A D | sparsemem.h | 4 #include <asm/memory.h> 10 * to address the last byte of memory. 13 * the maximum amount of memory in a section. 18 * Define these in your mach/memory.h.
|
H A D | barrier.h | 10 #define sev() __asm__ __volatile__ ("sev" : : : "memory") 11 #define wfe() __asm__ __volatile__ ("wfe" : : : "memory") 12 #define wfi() __asm__ __volatile__ ("wfi" : : : "memory") 16 #define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory") 17 #define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory") 18 #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") 21 : : "r" (0) : "memory") 23 : : "r" (0) : "memory") 25 : : "r" (0) : "memory") 28 : : "r" (0) : "memory") 30 : : "r" (0) : "memory") 31 #define dmb(x) __asm__ __volatile__ ("" : : : "memory") 33 #define isb(x) __asm__ __volatile__ ("" : : : "memory") 35 : : "r" (0) : "memory") 36 #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
|
H A D | irqflags.h | 31 : "=r" (flags) : : "memory", "cc"); arch_local_irq_save() 42 : "memory", "cc"); arch_local_irq_enable() 52 : "memory", "cc"); arch_local_irq_disable() 55 #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc") 56 #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc") 59 #define local_abt_enable() __asm__("cpsie a @ __sta" : : : "memory", "cc") 60 #define local_abt_disable() __asm__("cpsid a @ __cla" : : : "memory", "cc") 81 : "memory", "cc"); arch_local_irq_save() 98 : "memory", "cc"); arch_local_irq_enable() 114 : "memory", "cc"); arch_local_irq_disable() 129 : "memory", "cc"); \ 144 : "memory", "cc"); \ 160 : "=r" (flags) : : "memory", "cc"); arch_local_save_flags() 174 : "memory", "cc"); arch_local_irq_restore()
|
/linux-4.4.14/tools/arch/arm64/include/asm/ |
H A D | barrier.h | 6 * f428ebd184c82a7914b2aa7e9f868918aaf7ea78 perf tools: Fix AAAAARGH64 memory barriers 12 #define mb() asm volatile("dmb ish" ::: "memory") 13 #define wmb() asm volatile("dmb ishst" ::: "memory") 14 #define rmb() asm volatile("dmb ishld" ::: "memory")
|
/linux-4.4.14/include/uapi/linux/ |
H A D | sysinfo.h | 10 __kernel_ulong_t totalram; /* Total usable main memory size */ 11 __kernel_ulong_t freeram; /* Available memory size */ 12 __kernel_ulong_t sharedram; /* Amount of shared memory */ 18 __kernel_ulong_t totalhigh; /* Total high memory size */ 19 __kernel_ulong_t freehigh; /* Available high memory size */
|
H A D | atm_eni.h | 19 /* printk memory map */
|
/linux-4.4.14/arch/parisc/include/asm/ |
H A D | irqflags.h | 10 asm volatile("ssm 0, %0" : "=r" (flags) : : "memory"); arch_local_save_flags() 16 asm volatile("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory"); arch_local_irq_disable() 21 asm volatile("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory"); arch_local_irq_enable() 27 asm volatile("rsm %1,%0" : "=r" (flags) : "i" (PSW_I) : "memory"); arch_local_irq_save() 33 asm volatile("mtsm %0" : : "r" (flags) : "memory"); arch_local_irq_restore()
|
H A D | special_insns.h | 16 : "r" (gr), "i" (cr) : "memory") 37 __asm__ __volatile__("mtsp %%r0,%0" : : "i" (cr) : "memory"); \ 41 : "r" (val), "i" (cr) : "memory"); }
|
/linux-4.4.14/tools/arch/powerpc/include/asm/ |
H A D | barrier.h | 11 * The sync instruction guarantees that all memory accesses initiated 13 * mechanisms that access memory). The eieio instruction is a barrier 15 * loads and stores to non-cacheable memory (e.g. I/O devices). 21 * *mb() variants without smp_ prefix must order all types of memory 25 #define mb() __asm__ __volatile__ ("sync" : : : "memory") 26 #define rmb() __asm__ __volatile__ ("sync" : : : "memory") 27 #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
|
/linux-4.4.14/arch/ia64/include/uapi/asm/ |
H A D | gcc_intrin.h | 17 #define ia64_barrier() asm volatile ("":::"memory") 25 #define ia64_flushrs() asm volatile ("flushrs;;":::"memory") 27 #define ia64_loadrs() asm volatile ("loadrs;;":::"memory") 37 asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \ 42 "r"(val): "memory"); \ 47 "r"(val): "memory" ); \ 51 "r"(val): "memory"); \ 54 asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \ 103 asm volatile ("hint @pause" ::: "memory"); \ 206 asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ 212 asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ 218 asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ 224 asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ 230 asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ 239 : "memory"); \ 249 : "memory"); \ 260 : "memory"); \ 270 : "memory"); \ 279 : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \ 287 : "r" (ptr), "r" (x) : "memory"); \ 295 : "r" (ptr), "r" (x) : "memory"); \ 303 : "r" (ptr), "r" (x) : "memory"); \ 312 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ 321 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ 330 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ 340 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ 349 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ 358 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ 367 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ 377 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ 381 #define ia64_mf() asm volatile ("mf" ::: "memory") 382 #define ia64_mfa() asm volatile ("mf.a" ::: "memory") 384 #define ia64_invala() asm volatile ("invala" ::: "memory") 393 #define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory") 394 #define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory"); 406 #define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory") 408 #define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory") 412 :: "r"(trnum), "r"(addr) : "memory") 415 :: "r"(trnum), "r"(addr) : "memory") 420 asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \ 425 asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory") 428 asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory") 431 asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory") 434 asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory") 437 asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory") 440 asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory"); 492 #define ia64_native_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory") 495 #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory") 497 #define ia64_native_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory") 498 #define ia64_native_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory") 499 #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory") 500 #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory") 506 asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \ 512 asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \ 517 asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory") 520 asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory") 615 :: "r"((x)) : "p6", "p7", "memory"); \
|
H A D | setup.h | 9 __u64 efi_memmap; /* physical address of EFI memory map */ 10 __u64 efi_memmap_size; /* size of EFI memory map */ 11 __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */ 12 __u32 efi_memdesc_version; /* memory descriptor version */
|
/linux-4.4.14/arch/arm64/include/asm/ |
H A D | barrier.h | 23 #define sev() asm volatile("sev" : : : "memory") 24 #define wfe() asm volatile("wfe" : : : "memory") 25 #define wfi() asm volatile("wfi" : : : "memory") 27 #define isb() asm volatile("isb" : : : "memory") 28 #define dmb(opt) asm volatile("dmb " #opt : : : "memory") 29 #define dsb(opt) asm volatile("dsb " #opt : : : "memory") 48 : "=Q" (*p) : "r" (v) : "memory"); \ 52 : "=Q" (*p) : "r" (v) : "memory"); \ 56 : "=Q" (*p) : "r" (v) : "memory"); \ 60 : "=Q" (*p) : "r" (v) : "memory"); \ 73 : "Q" (*p) : "memory"); \ 78 : "Q" (*p) : "memory"); \ 83 : "Q" (*p) : "memory"); \ 88 : "Q" (*p) : "memory"); \
|
H A D | irqflags.h | 34 : "memory"); arch_local_irq_save() 44 : "memory"); arch_local_irq_enable() 53 : "memory"); arch_local_irq_disable() 56 #define local_fiq_enable() asm("msr daifclr, #1" : : : "memory") 57 #define local_fiq_disable() asm("msr daifset, #1" : : : "memory") 59 #define local_async_enable() asm("msr daifclr, #4" : : : "memory") 60 #define local_async_disable() asm("msr daifset, #4" : : : "memory") 72 : "memory"); arch_local_save_flags() 85 : "memory"); arch_local_irq_restore() 102 : "=r" (flags) : : "memory"); \ 110 : : "r" (flags) : "memory"); \ 113 #define local_dbg_enable() asm("msr daifclr, #8" : : : "memory") 114 #define local_dbg_disable() asm("msr daifset, #8" : : : "memory")
|
H A D | atomic_ll_sc.h | 82 ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__) 87 ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\ 88 ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__) 145 ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) 150 ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \ 151 ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) 183 : "cc", "memory"); atomic64_dec_if_positive() 220 __CMPXCHG_CASE(w, b, acq_1, , a, , "memory") 221 __CMPXCHG_CASE(w, h, acq_2, , a, , "memory") 222 __CMPXCHG_CASE(w, , acq_4, , a, , "memory") 223 __CMPXCHG_CASE( , , acq_8, , a, , "memory") 224 __CMPXCHG_CASE(w, b, rel_1, , , l, "memory") 225 __CMPXCHG_CASE(w, h, rel_2, , , l, "memory") 226 __CMPXCHG_CASE(w, , rel_4, , , l, "memory") 227 __CMPXCHG_CASE( , , rel_8, , , l, "memory") 228 __CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory") 229 __CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory") 230 __CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory") 231 __CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory") 265 __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
|
H A D | atomic_lse.h | 99 ATOMIC_OP_ADD_RETURN(_acquire, a, "memory") 100 ATOMIC_OP_ADD_RETURN(_release, l, "memory") 101 ATOMIC_OP_ADD_RETURN( , al, "memory") 162 ATOMIC_OP_SUB_RETURN(_acquire, a, "memory") 163 ATOMIC_OP_SUB_RETURN(_release, l, "memory") 164 ATOMIC_OP_SUB_RETURN( , al, "memory") 240 ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory") 241 ATOMIC64_OP_ADD_RETURN(_release, l, "memory") 242 ATOMIC64_OP_ADD_RETURN( , al, "memory") 303 ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory") 304 ATOMIC64_OP_SUB_RETURN(_release, l, "memory") 305 ATOMIC64_OP_SUB_RETURN( , al, "memory") 333 : "x30", "cc", "memory"); atomic64_dec_if_positive() 371 __CMPXCHG_CASE(w, b, acq_1, a, "memory") 372 __CMPXCHG_CASE(w, h, acq_2, a, "memory") 373 __CMPXCHG_CASE(w, , acq_4, a, "memory") 374 __CMPXCHG_CASE(x, , acq_8, a, "memory") 375 __CMPXCHG_CASE(w, b, rel_1, l, "memory") 376 __CMPXCHG_CASE(w, h, rel_2, l, "memory") 377 __CMPXCHG_CASE(w, , rel_4, l, "memory") 378 __CMPXCHG_CASE(x, , rel_8, l, "memory") 379 __CMPXCHG_CASE(w, b, mb_1, al, "memory") 380 __CMPXCHG_CASE(w, h, mb_2, al, "memory") 381 __CMPXCHG_CASE(w, , mb_4, al, "memory") 382 __CMPXCHG_CASE(x, , mb_8, al, "memory") 425 __CMPXCHG_DBL(_mb, al, "memory")
|
H A D | cmpxchg.h | 63 __XCHG_CASE(w, b, acq_1, , , a, a, , "memory") 64 __XCHG_CASE(w, h, acq_2, , , a, a, , "memory") 65 __XCHG_CASE(w, , acq_4, , , a, a, , "memory") 66 __XCHG_CASE( , , acq_8, , , a, a, , "memory") 67 __XCHG_CASE(w, b, rel_1, , , , , l, "memory") 68 __XCHG_CASE(w, h, rel_2, , , , , l, "memory") 69 __XCHG_CASE(w, , rel_4, , , , , l, "memory") 70 __XCHG_CASE( , , rel_8, , , , , l, "memory") 71 __XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory") 72 __XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory") 73 __XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory") 74 __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
|
/linux-4.4.14/arch/m68k/include/asm/ |
H A D | irqflags.h | 12 asm volatile ("movew %%sr,%0" : "=d" (flags) : : "memory"); arch_local_save_flags() 25 : "cc", "%d0", "memory"); arch_local_irq_disable() 27 asm volatile ("oriw #0x0700,%%sr" : : : "memory"); arch_local_irq_disable() 40 : "cc", "%d0", "memory"); arch_local_irq_enable() 49 : "memory"); arch_local_irq_enable() 62 asm volatile ("movew %0,%%sr" : : "d" (flags) : "memory"); arch_local_irq_restore()
|
H A D | page_offset.h | 1 /* This handles the memory map.. */
|
H A D | pci.h | 7 /* The PCI address space does equal the physical memory
|
/linux-4.4.14/arch/arm/mach-sa1100/include/mach/ |
H A D | memory.h | 2 * arch/arm/mach-sa1100/include/mach/memory.h 13 * Because of the wide memory address space between physical RAM banks on the 15 * our memory map representation. Assuming all memory nodes have equal access 16 * characteristics, we then have generic discontiguous memory support. 18 * The sparsemem banks are matched with the physical memory bank addresses
|
/linux-4.4.14/tools/arch/x86/include/asm/ |
H A D | barrier.h | 19 #define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 20 #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 21 #define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 23 #define mb() asm volatile("mfence":::"memory") 24 #define rmb() asm volatile("lfence":::"memory") 25 #define wmb() asm volatile("sfence" ::: "memory")
|
H A D | rmwcc.h | 10 : "memory" : cc_label); \ 29 : __VA_ARGS__ : "memory"); \
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/core/ |
H A D | memory.c | 24 #include <core/memory.h> 29 struct nvkm_memory *memory) nvkm_memory_ctor() 31 memory->func = func; nvkm_memory_ctor() 37 struct nvkm_memory *memory = *pmemory; nvkm_memory_del() local 38 if (memory && !WARN_ON(!memory->func)) { nvkm_memory_del() 39 if (memory->func->dtor) nvkm_memory_del() 40 *pmemory = memory->func->dtor(memory); nvkm_memory_del() 52 struct nvkm_memory *memory; nvkm_memory_new() local 58 ret = nvkm_instobj_new(imem, size, align, zero, &memory); nvkm_memory_new() 62 *pmemory = memory; nvkm_memory_new() 28 nvkm_memory_ctor(const struct nvkm_memory_func *func, struct nvkm_memory *memory) nvkm_memory_ctor() argument
|
/linux-4.4.14/arch/unicore32/include/asm/ |
H A D | barrier.h | 13 #define isb() __asm__ __volatile__ ("" : : : "memory") 14 #define dsb() __asm__ __volatile__ ("" : : : "memory") 15 #define dmb() __asm__ __volatile__ ("" : : : "memory")
|
/linux-4.4.14/lib/zlib_inflate/ |
H A D | Makefile | 2 # This is a modified version of zlib, which does all memory 8 # Decompression needs to be serialized for each memory 12 # any nasty situations wrt memory management, and that the
|
/linux-4.4.14/arch/sparc/include/asm/ |
H A D | barrier_64.h | 5 * #51. Essentially, if a memory barrier occurs soon after a mispredicted 9 * It used to be believed that the memory barrier had to be right in the 10 * delay slot, but a case has been traced recently wherein the memory barrier 22 * the memory barrier explicitly into a "branch always, predicted taken" 29 : : : "memory"); \ 32 /* The kernel always executes in TSO memory model these days, 34 * memory ordering than required by the specifications. 37 #define rmb() __asm__ __volatile__("":::"memory") 38 #define wmb() __asm__ __volatile__("":::"memory") 51 #define smp_mb() __asm__ __volatile__("":::"memory") 52 #define smp_rmb() __asm__ __volatile__("":::"memory") 53 #define smp_wmb() __asm__ __volatile__("":::"memory")
|
H A D | cacheflush.h | 5 #define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
|
H A D | spinlock_64.h | 46 : "memory"); arch_spin_lock() 57 : "memory"); arch_spin_trylock() 68 : "memory"); arch_spin_unlock() 90 : "memory"); arch_spin_lock_flags() 115 : "memory"); arch_read_lock() 134 : "memory"); arch_read_trylock() 152 : "memory"); arch_read_unlock() 177 : "memory"); arch_write_lock() 186 : "memory"); arch_write_unlock() 208 : "memory"); arch_write_trylock()
|
H A D | swift.h | 35 : "memory"); swift_inv_insn_tag() 44 : "memory"); swift_inv_data_tag() 79 : "memory"); swift_flush_page() 87 : "memory"); swift_flush_segment() 95 : "memory"); swift_flush_region() 103 : "memory"); swift_flush_context()
|
H A D | highmem.h | 2 * highmem.h: virtual kernel memory mappings for high memory 4 * Used in CONFIG_HIGHMEM systems for memory pages which 12 * up to 16 Terrabyte physical memory. With current x86 CPUs
|
H A D | irqflags_64.h | 35 : "memory" arch_local_irq_restore() 45 : "memory" arch_local_irq_disable() 55 : "memory" arch_local_irq_enable() 89 : "memory" arch_local_irq_save()
|
/linux-4.4.14/arch/tile/mm/ |
H A D | Makefile | 2 # Makefile for the linux tile-specific parts of the memory manager.
|
/linux-4.4.14/arch/xtensa/mm/ |
H A D | Makefile | 2 # Makefile for the Linux/Xtensa-specific parts of the memory manager.
|
/linux-4.4.14/arch/cris/include/arch-v10/arch/ |
H A D | irqflags.h | 9 asm volatile("move $ccr,%0" : "=rm" (flags) : : "memory"); arch_local_save_flags() 15 asm volatile("di" : : : "memory"); arch_local_irq_disable() 20 asm volatile("ei" : : : "memory"); arch_local_irq_enable() 32 asm volatile("move %0,$ccr" : : "rm" (flags) : "memory"); arch_local_irq_restore()
|
H A D | pgtable.h | 5 * Kernels own virtual memory area.
|
/linux-4.4.14/lib/ |
H A D | memory-notifier-error-inject.c | 3 #include <linux/memory.h> 9 MODULE_PARM_DESC(priority, "specify memory notifier priority"); 25 dir = notifier_err_inject_init("memory", notifier_err_inject_dir, err_inject_init() 46 MODULE_DESCRIPTION("memory notifier error injection module");
|
/linux-4.4.14/fs/dlm/ |
H A D | Makefile | 9 memory.o \
|
/linux-4.4.14/arch/cris/include/arch-v32/arch/ |
H A D | irqflags.h | 10 asm volatile("move $ccs,%0" : "=rm" (flags) : : "memory"); arch_local_save_flags() 16 asm volatile("di" : : : "memory"); arch_local_irq_disable() 21 asm volatile("ei" : : : "memory"); arch_local_irq_enable() 33 asm volatile("move %0,$ccs" : : "rm" (flags) : "memory"); arch_local_irq_restore()
|
H A D | pgtable.h | 4 /* Define the kernels virtual memory area. */
|
/linux-4.4.14/arch/frv/include/asm/ |
H A D | barrier.h | 1 /* FR-V CPU memory barrier definitions 17 #define mb() asm volatile ("membar" : : :"memory") 18 #define rmb() asm volatile ("membar" : : :"memory") 19 #define wmb() asm volatile ("membar" : : :"memory")
|
H A D | fpu.h | 9 #define kernel_fpu_end() do { asm volatile("bar":::"memory"); preempt_enable(); } while(0)
|
H A D | irqflags.h | 35 : "memory", "icc2" arch_local_irq_disable() 46 : "memory", "icc2" arch_local_irq_enable() 57 : "memory"); arch_local_save_flags() 83 : "memory", "icc2" arch_local_irq_restore() 110 : "memory"); \ 121 : "memory"); \ 130 : "memory"); \ 143 : "memory"); \ 152 : "memory"); \
|
H A D | highmem.h | 1 /* highmem.h: virtual kernel memory mappings for high memory 84 asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory"); \ 89 :: "r"(dampr) : "memory" \ 107 : : "r"(damlr), "r"(dampr) : "memory"); \ 126 asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory"); \ 128 asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory"); \ 133 asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
|
/linux-4.4.14/arch/blackfin/include/asm/ |
H A D | page_offset.h | 2 * This handles the memory map
|
H A D | l1layout.h | 2 * Defines a layout of L1 scratchpad memory that userspace can rely on. 18 memory, so that each process can access it at a fixed address. Used for 30 /* A pointer to the structure in memory. */
|
/linux-4.4.14/tools/arch/sparc/include/asm/ |
H A D | barrier_64.h | 7 * #51. Essentially, if a memory barrier occurs soon after a mispredicted 11 * It used to be believed that the memory barrier had to be right in the 12 * delay slot, but a case has been traced recently wherein the memory barrier 24 * the memory barrier explicitly into a "branch always, predicted taken" 31 : : : "memory"); \ 34 /* The kernel always executes in TSO memory model these days, 36 * memory ordering than required by the specifications. 39 #define rmb() __asm__ __volatile__("":::"memory") 40 #define wmb() __asm__ __volatile__("":::"memory")
|
/linux-4.4.14/tools/testing/selftests/user/ |
H A D | Makefile | 1 # Makefile for user memory selftests
|
/linux-4.4.14/include/drm/ |
H A D | drm_gem_cma_helper.h | 8 * struct drm_gem_cma_object - GEM object backed by CMA memory allocations 10 * @paddr: physical address of the backing memory 12 * @vaddr: kernel virtual address of the backing memory 19 /* For objects with DMA memory allocated by GEM CMA */ 32 /* create memory region for DRM framebuffer */ 37 /* create memory region for DRM framebuffer */ 42 /* map memory region for DRM framebuffer to user space */ 50 /* allocate physical memory */
|
/linux-4.4.14/arch/sh/include/asm/ |
H A D | bl_bit_32.h | 15 : "memory" set_bl_bit() 29 : "memory" clear_bl_bit()
|
H A D | cmpxchg-llsc.h | 19 : "t", "memory" xchg_u32() 40 : "t", "memory" xchg_u8() 65 : "t", "memory" __cmpxchg_u32()
|
H A D | sparsemem.h | 8 * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
|
H A D | bitops-llsc.h | 21 : "t", "memory" set_bit() 42 : "t", "memory" clear_bit() 63 : "t", "memory" change_bit() 86 : "t", "memory" test_and_set_bit() 112 : "t", "memory" test_and_clear_bit() 138 : "t", "memory" test_and_change_bit()
|
H A D | spinlock.h | 53 : "t", "memory" arch_spin_lock() 66 : "t", "memory" arch_spin_unlock() 84 : "t", "memory" arch_spin_trylock() 125 : "t", "memory" arch_read_lock() 141 : "t", "memory" arch_read_unlock() 159 : "t", "memory" arch_write_lock() 169 : "t", "memory" arch_write_unlock() 190 : "t", "memory" arch_read_trylock() 213 : "t", "memory" arch_write_trylock()
|
H A D | device.h | 12 /* allocate contiguous memory chunk and fill in struct resource */
|
/linux-4.4.14/arch/microblaze/include/asm/ |
H A D | irqflags.h | 24 : "memory"); arch_local_irq_save() 35 : "memory"); arch_local_irq_disable() 45 : "memory"); arch_local_irq_enable() 60 : "memory"); arch_local_irq_save() 74 : "memory"); arch_local_irq_disable() 87 : "memory"); arch_local_irq_enable() 99 : "memory"); arch_local_save_flags() 109 : "memory"); arch_local_irq_restore()
|
H A D | exceptions.h | 34 : "memory") 41 : "memory") 52 : "memory", "r12") 63 : "memory", "r12")
|
H A D | fixmap.h | 2 * fixmap.h: compile-time virtual memory allocation 36 * from the end of virtual memory (0xfffff000) backwards. 41 * these 'compile-time allocated' memory buffers are 44 * physical memory with fixmap indices.
|
/linux-4.4.14/tools/arch/ia64/include/asm/ |
H A D | barrier.h | 19 * Macros to force memory ordering. In these descriptions, "previous" 21 * architecturally visible effects of a memory access have occurred 22 * (at a minimum, this means the memory has been read or written). 24 * wmb(): Guarantees that all preceding stores to memory- 29 * mb(): wmb()/rmb() combo, i.e., all previous memory 35 * accesses to memory mapped I/O registers. For that, mf.a needs to 38 * sequential memory pages only. 42 #define ia64_mf() asm volatile ("mf" ::: "memory")
|
/linux-4.4.14/arch/blackfin/kernel/ |
H A D | fixed_code.S | 35 * Inputs: P0: memory address to use 37 * Output: R0: old contents of the memory address, zero extended. 48 * Inputs: P0: memory address to use 51 * The new value is stored if the contents of the memory 53 * Output: R0: old contents of the memory address. 67 * Inputs: P0: memory address to use 69 * Outputs: R0: new contents of the memory address. 70 * R1: previous contents of the memory address. 82 * Inputs: P0: memory address to use 84 * Outputs: R0: new contents of the memory address. 85 * R1: previous contents of the memory address. 97 * Inputs: P0: memory address to use 99 * Outputs: R0: new contents of the memory address. 100 * R1: previous contents of the memory address. 112 * Inputs: P0: memory address to use 114 * Outputs: R0: new contents of the memory address. 115 * R1: previous contents of the memory address. 127 * Inputs: P0: memory address to use 129 * Outputs: R0: new contents of the memory address. 130 * R1: previous contents of the memory address.
|
/linux-4.4.14/drivers/base/ |
H A D | dma-coherent.c | 2 * Coherent per-device memory handling. 141 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area 143 * @dev: device from which we allocate memory 144 * @size: size of requested memory area 150 * to support allocation from per-device coherent memory pools. 153 * generic memory areas, or !0 if dma_alloc_coherent should return @ret. 193 * per-device area, try to fall back to generic memory if the dma_alloc_from_coherent() 201 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool 202 * @dev: device from which the memory was allocated 206 * This checks whether the memory was allocated from the per-device 207 * coherent memory pool and if so, releases that memory. 209 * Returns 1 if we correctly released the memory, or 0 if 210 * dma_release_coherent() should proceed with releasing memory from 232 * dma_mmap_from_coherent() - try to mmap the memory allocated from 233 * per-device coherent memory pool to userspace 234 * @dev: device from which the memory was allocated 235 * @vma: vm_area for the userspace memory 237 * @size: size of the memory buffer allocated by dma_alloc_from_coherent 240 * This checks whether the memory was allocated from the per-device 241 * coherent memory pool and if so, maps that memory to the provided vma. 243 * Returns 1 if we correctly mapped the memory, or 0 if the caller should 244 * proceed with mapping memory from generic pools. 272 * Support for reserved memory regions defined in device tree 287 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", rmem_dma_device_init() 316 pr_err("Reserved memory: regions without no-map are not yet supported\n"); rmem_dma_setup() 322 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n", rmem_dma_setup()
|
H A D | dma-contiguous.c | 43 * The size can be set in bytes or as a percentage of the total memory 82 for_each_memblock(memory, reg) cma_early_percent_memory() 99 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling 100 * @limit: End address of the reserved memory (optional, 0 for any). 102 * This function reserves memory from early allocator. It should be 105 * memory. 149 * @limit: End address of the reserved memory (optional, 0 for any). 153 * This function reserves memory from early allocator. It should be 156 * memory. This function allows to create custom reserved areas for specific 172 /* Architecture specific contiguous memory fixup. */ dma_contiguous_reserve_area() 185 * This function allocates memory buffer for specified device. It uses 186 * device specific contiguous memory area if available or the default 205 * This function releases memory allocated by dma_alloc_from_contiguous(). 216 * Support for reserved memory regions defined in device tree 256 pr_err("Reserved memory: incorrect alignment of CMA region\n"); rmem_cma_setup() 262 pr_err("Reserved memory: unable to setup CMA region\n"); rmem_cma_setup() 265 /* Architecture specific contiguous memory fixup. */ rmem_cma_setup() 274 pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", rmem_cma_setup()
|
H A D | dma-mapping.c | 54 * @dev: Device to allocate coherent memory for 63 * Pointer to allocated memory on success, NULL on failure. 93 * @dev: Device to free coherent memory for 95 * @vaddr: Virtual address of the memory to free 96 * @dma_handle: DMA handle of the memory to free 113 * @dev: Device to allocate non_coherent memory for 122 * Pointer to allocated memory on success, NULL on failure. 152 * @dev: Device to free noncoherent memory for 154 * @vaddr: Virtual address of the memory to free 155 * @dma_handle: DMA handle of the memory to free 179 * @dev: Device to declare coherent memory for 180 * @phys_addr: Physical address of coherent memory to be declared 181 * @device_addr: Device address of coherent memory to be declared 182 * @size: Size of coherent memory to be declared 213 * @dev: Device to release declared coherent memory for 244 * Create userspace mapping for the DMA-coherent memory.
|
/linux-4.4.14/arch/x86/kernel/ |
H A D | head.c | 10 * memory, and usually decreases the reported amount of 11 * conventional memory (int 0x12) too. This also contains a 19 * memory in the bottom megabyte is rarely a problem, as long 20 * as we have enough memory to install the trampoline. Using 21 * memory that is in use by the BIOS or by some DMA device 35 * end of conventional memory, we need to look at reserve_ebda_region() 38 * that the paravirt case can handle memory setup reserve_ebda_region() 44 /* end of low (conventional) memory */ reserve_ebda_region() 53 * reporting so, so just consider the memory above 0x9f000 reserve_ebda_region() 69 /* reserve all memory between lowmem and the 1MB mark */ reserve_ebda_region()
|
/linux-4.4.14/arch/x86/include/asm/ |
H A D | sync_bitops.h | 19 * sync_set_bit - Atomically set a bit in memory 34 : "memory"); sync_set_bit() 38 * sync_clear_bit - Clears a bit in memory 43 * not contain a memory barrier, so if it is used for locking purposes, 52 : "memory"); sync_clear_bit() 56 * sync_change_bit - Toggle a bit in memory 69 : "memory"); sync_change_bit() 78 * It also implies a memory barrier. 86 : "Ir" (nr) : "memory"); sync_test_and_set_bit() 96 * It also implies a memory barrier. 104 : "Ir" (nr) : "memory"); sync_test_and_clear_bit() 114 * It also implies a memory barrier. 122 : "Ir" (nr) : "memory"); sync_test_and_change_bit()
|
H A D | sparsemem.h | 6 * generic non-linear memory support: 8 * 1) we will not split memory into more chunks than will fit into the flags 13 * MAX_PHYSMEM_BITS 2^n: how much memory we can have in that space
|
H A D | barrier.h | 22 #define mb() asm volatile("mfence":::"memory") 23 #define rmb() asm volatile("lfence":::"memory") 24 #define wmb() asm volatile("sfence" ::: "memory") 52 * For this option x86 doesn't have a strong TSO memory 71 #else /* regular x86 TSO memory ordering */
|
H A D | page_32_types.h | 7 * This handles the memory map. 11 * amount of physical memory you can use to about 950MB. 13 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
|
H A D | rmwcc.h | 10 : "memory" : cc_label); \ 29 : __VA_ARGS__ : "memory"); \
|
H A D | kvm_emulate.h | 50 * These operations represent the instruction emulator's interface to memory. 51 * There are two categories of operation: those that act on ordinary memory 52 * regions (*_std), and those that act on memory regions known to require 55 * The emulator assumes that an instruction accesses only one 'emulated memory' 58 * stack operations are assumed never to access emulated memory. The emulator 60 * emulated memory, and assumes that the other operand accesses normal memory. 63 * 1. The emulator isn't very smart about emulated vs. standard memory. 64 * 'Emulated memory' access addresses should be checked for sanity. 65 * 'Normal memory' accesses may fault, and the caller must arrange to 103 * read_std: Read bytes of standard (non-emulated/special) memory. 106 * @val: [OUT] Value read from memory, zero-extended to 'u_long'. 107 * @bytes: [IN ] Number of bytes to read from memory. 115 * read_phys: Read bytes of standard (non-emulated/special) memory. 118 * @val: [OUT] Value read from memory. 119 * @bytes: [IN ] Number of bytes to read from memory. 125 * write_std: Write bytes of standard (non-emulated/special) memory. 128 * @val: [OUT] Value write to memory, zero-extended to 'u_long'. 129 * @bytes: [IN ] Number of bytes to write to memory. 135 * fetch: Read bytes of standard (non-emulated/special) memory. 138 * @val: [OUT] Value read from memory, zero-extended to 'u_long'. 139 * @bytes: [IN ] Number of bytes to read from memory. 146 * read_emulated: Read bytes from emulated/special memory area. 148 * @val: [OUT] Value read from memory, zero-extended to 'u_long'. 149 * @bytes: [IN ] Number of bytes to read from memory. 156 * write_emulated: Write bytes to emulated/special memory area. 158 * @val: [IN ] Value to write to memory (low-order bytes used as 160 * @bytes: [IN ] Number of bytes to write to memory. 169 * emulated/special memory area.
|
H A D | edac.h | 11 * Very carefully read and write to memory atomically so we edac_atomic_scrub()
|
/linux-4.4.14/fs/ntfs/ |
H A D | malloc.h | 2 * malloc.h - NTFS kernel memory handling. Part of the Linux-NTFS project. 30 * __ntfs_malloc - allocate memory in multiples of pages 36 * Allocates @size bytes of memory, rounded up to multiples of PAGE_SIZE and 37 * returns a pointer to the allocated memory. 39 * If there was insufficient memory to complete the request, return NULL. 56 * ntfs_malloc_nofs - allocate memory in multiples of pages 59 * Allocates @size bytes of memory, rounded up to multiples of PAGE_SIZE and 60 * returns a pointer to the allocated memory. 62 * If there was insufficient memory to complete the request, return NULL. 70 * ntfs_malloc_nofs_nofail - allocate memory in multiples of pages 73 * Allocates @size bytes of memory, rounded up to multiples of PAGE_SIZE and 74 * returns a pointer to the allocated memory. 79 * If there was insufficient memory to complete the request, return NULL.
|
/linux-4.4.14/arch/powerpc/include/asm/ |
H A D | barrier.h | 9 * The sync instruction guarantees that all memory accesses initiated 11 * mechanisms that access memory). The eieio instruction is a barrier 13 * loads and stores to non-cacheable memory (e.g. I/O devices). 21 * *mb() variants without smp_ prefix must order all types of memory 25 * For the smp_ barriers, ordering is for cacheable memory operations 33 #define mb() __asm__ __volatile__ ("sync" : : : "memory") 34 #define rmb() __asm__ __volatile__ ("sync" : : : "memory") 35 #define wmb() __asm__ __volatile__ ("sync" : : : "memory") 45 #define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") 47 #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") 54 #define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") 69 * x is a variable loaded from memory, this prevents following 73 asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
|
H A D | edac.h | 16 * ECC scrubbing. It reads memory and then writes back the original 17 * value, allowing the hardware to detect and correct memory errors. 26 /* Very carefully read and write to memory atomically edac_atomic_scrub() 36 : "cr0", "memory"); edac_atomic_scrub()
|
H A D | local.h | 34 : "cc", "memory"); local_add_return() 53 : "cc", "memory"); local_sub_return() 70 : "cc", "xer", "memory"); local_inc_return() 97 : "cc", "xer", "memory"); local_dec_return() 131 : "cc", "memory"); local_add_unless() 160 : "cc", "memory"); local_dec_if_positive()
|
H A D | mmzone.h | 14 * generic non-linear memory support: 16 * 1) we will not split memory into more chunks than will fit into the
|
H A D | fixmap.h | 2 * fixmap.h: compile-time virtual memory allocation 32 * from the end of virtual memory (0xfffff000) backwards. 37 * these 'compile-time allocated' memory buffers are 40 * physical memory with fixmap indices.
|
/linux-4.4.14/arch/metag/kernel/ |
H A D | tcm.c | 41 * tcm_alloc - allocate memory from a TCM pool 42 * @tag: tag of the pool to allocate memory from 46 * the specified tag. Returns the address of the allocated memory 66 * tcm_free - free a block of memory to a TCM pool 67 * @tag: tag of the pool to free memory to 68 * @addr: address of the memory to be freed 86 * @p: memory address to lookup the tag for 88 * Find the tag of the tcm memory region that contains the 90 * memory region could be found. 108 * tcm_add_region - add a memory region to TCM pool list 111 * Add a region of memory to the TCM pool list. Returns 0 on success. 119 pr_err("Failed to alloc memory for TCM pool!\n"); tcm_add_region() 141 pr_err("Failed to add memory to TCM pool!\n"); tcm_add_region()
|
/linux-4.4.14/include/linux/ |
H A D | genalloc.h | 3 * memory, for example, memory that is not managed by the regular 5 * memory, uncached memory etc. 14 * The lockless operation only works if there is enough memory 15 * available. If new memory is added to the pool a lock has to be 17 * that sufficient memory is preallocated. 53 * General purpose special memory pool descriptor. 67 * General purpose special memory pool chunk descriptor. 72 phys_addr_t phys_addr; /* physical starting address of memory chunk */ 73 unsigned long start_addr; /* start address of memory chunk */ 74 unsigned long end_addr; /* end address of memory chunk (inclusive) */ 75 unsigned long bits[0]; /* bitmap for allocating memory chunk */ 83 * gen_pool_add - add a new chunk of special memory to the pool 84 * @pool: pool to add new memory chunk to 85 * @addr: starting address of memory chunk to add to pool 86 * @size: size in bytes of the memory chunk to add to pool 90 * Add a new chunk of special memory to the specified pool.
|
H A D | dqblk_v1.h | 2 * File with in-memory structures of old quota format
|
H A D | dma-contiguous.h | 21 * allocate big contiguous chunks of memory after the system has 27 * IO map support and require contiguous blocks of memory to 31 * Such devices often require big memory buffers (a full HD frame 33 * MB of memory), which makes mechanisms such as kmalloc() or 36 * At the same time, a solution where a big memory region is 37 * reserved for a device is suboptimal since often more memory is 38 * reserved then strictly required and, moreover, the memory is 41 * CMA tries to solve this issue by operating on memory regions 43 * can use the memory for pagecache and when device driver requests 90 * dma_declare_contiguous() - reserve area for contiguous memory handling 93 * @size: Size of the reserved memory. 94 * @base: Start address of the reserved memory (optional, 0 for any). 95 * @limit: End address of the reserved memory (optional, 0 for any). 97 * This function reserves memory for specified device. It should be
|
H A D | zpool.h | 2 * zpool memory storage api 6 * This is a common frontend for the zbud and zsmalloc memory 8 * store compressed memory. 23 * Note that this does not refer to memory protection, it 24 * refers to how the memory will be copied in/out if copying 26 * it copies the existing memory in on map, and copies the 27 * changed memory back out on unmap. Write-only does not copy 28 * in the memory and should only be used for initialization.
|
H A D | fsl-diu-fb.h | 70 * These are the fields of area descriptor(in DDR memory) for every plane 73 /* Word 0(32-bit) in DDR memory */ 86 /* Word 1(32-bit) in DDR memory */ 89 /* Word 2(32-bit) in DDR memory */ 97 /* Word 3(32-bit) in DDR memory */ 105 /* Word 4(32-bit) in DDR memory */ 113 /* Word 5(32-bit) in DDR memory */ 121 /* Word 6(32-bit) in DDR memory */ 127 /* Word 7(32-bit) in DDR memory */ 134 /* Word 8(32-bit) in DDR memory */ 137 /* Word 9(32-bit) in DDR memory, just for 64-bit aligned */
|
/linux-4.4.14/arch/powerpc/boot/ |
H A D | treeboot-iss4xx.c | 41 void *memory; iss_4xx_fixups() local 44 memory = finddevice("/memory"); iss_4xx_fixups() 45 if (!memory) iss_4xx_fixups() 46 fatal("Can't find memory node\n"); iss_4xx_fixups() 48 getprop(memory, "reg", reg, sizeof(reg)); iss_4xx_fixups() 50 /* If the device tree specifies the memory range, use it */ iss_4xx_fixups()
|
H A D | simpleboot.c | 50 /* Find the memory range */ platform_init() 52 "memory", sizeof("memory")); platform_init() 54 fatal("Cannot find memory node\n"); platform_init() 57 fatal("cannot get memory range\n"); platform_init() 59 /* Only interested in memory based at 0 */ platform_init() 80 /* Now we have the memory size; initialize the heap */ platform_init()
|
H A D | oflib.c | 109 * we claim the physical space in the /memory node and the virtual 115 static ihandle memory; variable 144 memory = of_call_prom("open", 1, 1, "/memory"); check_of_version() 145 if (memory == PROM_ERROR) { check_of_version() 146 memory = of_call_prom("open", 1, 1, "/memory@0"); check_of_version() 147 if (memory == PROM_ERROR) { check_of_version() 148 printf("no memory node\n"); check_of_version() 167 ret = of_call_prom_ret("call-method", 5, 2, &result, "claim", memory, of_claim() 195 fatal("Can't allocate memory for kernel image!\n\r"); of_vmlinux_alloc()
|
/linux-4.4.14/arch/m32r/include/asm/ |
H A D | irqflags.h | 27 : : : "memory"); arch_local_irq_disable() 38 : "cbit", "memory"); arch_local_irq_disable() 47 : : : "memory"); arch_local_irq_enable() 56 : "cbit", "memory"); arch_local_irq_enable() 70 : "memory"); arch_local_irq_save() 81 : "cbit", "memory"); arch_local_irq_save() 91 : "cbit", "memory"); arch_local_irq_restore()
|
H A D | cmpxchg.h | 30 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); __xchg() 36 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); __xchg() 42 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); __xchg() 51 : "memory" __xchg() 83 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); __xchg_local() 89 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); __xchg_local() 95 : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); __xchg_local() 130 : "cbit", "memory" __cmpxchg_u32() 161 : "cbit", "memory" __cmpxchg_local_u32()
|
/linux-4.4.14/arch/sh/kernel/ |
H A D | irq_32.c | 24 : "memory" arch_local_irq_restore() 37 : "memory" arch_local_irq_restore() 52 : "memory" arch_local_save_flags()
|
H A D | io.c | 17 * Copy data from IO memory space to "real" memory space. 57 : "r0", "r7", "t", "memory"); memcpy_fromio() 80 * Copy data from "real" memory space to IO memory space. 103 * "memset" on IO memory space.
|
/linux-4.4.14/arch/ia64/include/asm/ |
H A D | agp.h | 12 * To avoid memory-attribute aliasing issues, we require that the AGPGART engine operate 13 * in coherent mode, which lets us map the AGP memory as normal (write-back) memory
|
H A D | barrier.h | 17 * Macros to force memory ordering. In these descriptions, "previous" 19 * architecturally visible effects of a memory access have occurred 20 * (at a minimum, this means the memory has been read or written). 22 * wmb(): Guarantees that all preceding stores to memory- 27 * mb(): wmb()/rmb() combo, i.e., all previous memory 33 * accesses to memory mapped I/O registers. For that, mf.a needs to 36 * sequential memory pages only.
|
H A D | shmparam.h | 5 * SHMLBA controls minimum alignment at which shared memory segments
|
H A D | sparsemem.h | 7 * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
|
/linux-4.4.14/tools/perf/util/ |
H A D | wrapper.c | 7 * There's no pack memory to release - but stay close to the Git 22 die("Out of memory, strdup failed"); xstrdup() 38 die("Out of memory, realloc failed"); xrealloc()
|
/linux-4.4.14/drivers/staging/octeon/ |
H A D | ethernet-mem.c | 55 char *memory; cvm_oct_free_hw_skbuff() local 58 memory = cvmx_fpa_alloc(pool); cvm_oct_free_hw_skbuff() 59 if (memory) { cvm_oct_free_hw_skbuff() 61 *(struct sk_buff **)(memory - sizeof(void *)); cvm_oct_free_hw_skbuff() 65 } while (memory); cvm_oct_free_hw_skbuff() 76 * cvm_oct_fill_hw_memory - fill a hardware pool with memory. 85 char *memory; cvm_oct_fill_hw_memory() local 91 * FPA memory must be 128 byte aligned. Since we are cvm_oct_fill_hw_memory() 93 * can feed it to kfree when the memory is returned to cvm_oct_fill_hw_memory() 100 memory = kmalloc(size + 256, GFP_ATOMIC); cvm_oct_fill_hw_memory() 101 if (unlikely(memory == NULL)) { cvm_oct_fill_hw_memory() 106 fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL); cvm_oct_fill_hw_memory() 107 *((char **)fpa - 1) = memory; cvm_oct_fill_hw_memory() 115 * cvm_oct_free_hw_memory - Free memory allocated by cvm_oct_fill_hw_memory 122 char *memory; cvm_oct_free_hw_memory() local 130 memory = *((char **)fpa - 1); cvm_oct_free_hw_memory() 131 kfree(memory); cvm_oct_free_hw_memory()
|
/linux-4.4.14/drivers/of/ |
H A D | of_reserved_mem.c | 2 * Device tree based initialization code for reserved memory. 63 pr_err("Reserved memory not supported, ignoring region 0x%llx%s\n", early_init_dt_alloc_reserved_memory_arch() 78 pr_err("Reserved memory: not enough space all defined regions.\n"); fdt_reserved_mem_save_node() 92 * res_mem_alloc_size() - allocate reserved memory described by 'size', 'align' 111 pr_err("Reserved memory: invalid size property in '%s' node.\n", __reserved_mem_alloc_size() 122 pr_err("Reserved memory: invalid alignment property in '%s' node.\n", __reserved_mem_alloc_size() 137 pr_err("Reserved memory: invalid alloc-ranges property in '%s', skipping node.\n", __reserved_mem_alloc_size() 152 pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n", __reserved_mem_alloc_size() 164 pr_debug("Reserved memory: allocated memory for '%s' node: base %pa, size %ld MiB\n", __reserved_mem_alloc_size() 169 pr_info("Reserved memory: failed to allocate memory for node '%s'\n", __reserved_mem_alloc_size() 184 * res_mem_init_node() - call region specific reserved memory init code 199 pr_info("Reserved memory: initialized node %s, compatible id %s\n", __reserved_mem_init_node() 241 pr_err("Reserved memory: OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n", __rmem_check_for_overlap() 249 * fdt_init_reserved_mem - allocate and init all saved reserved memory regions 293 * of_reserved_mem_device_init() - assign reserved memory region to given device 295 * This function assign memory region pointed by "memory-region" device tree 304 np = of_parse_phandle(dev->of_node, "memory-region", 0); of_reserved_mem_device_init() 316 dev_info(dev, "assigned reserved memory node %s\n", rmem->name); of_reserved_mem_device_init() 323 * of_reserved_mem_device_release() - release reserved memory device structures 325 * This function releases structures allocated for memory region handling for 333 np = of_parse_phandle(dev->of_node, "memory-region", 0); of_reserved_mem_device_release()
|
/linux-4.4.14/arch/unicore32/mm/ |
H A D | Makefile | 2 # Makefile for the linux unicore-specific parts of the memory manager.
|
/linux-4.4.14/arch/x86/include/uapi/asm/ |
H A D | ucontext.h | 5 * information in the memory layout pointed
|
H A D | e820.h | 11 * internal memory map tables to have room for these additional 17 * of three memory map entries per node is "enough" entries for 42 * ( Note that older platforms also used 6 for the same type of memory, 50 * if CONFIG_INTEL_TXT is enabled, memory of this type will be 52 * any memory that BIOS might alter over the S3 transition 59 __u64 addr; /* start of memory segment */ 60 __u64 size; /* size of memory segment */ 61 __u32 type; /* type of memory segment */
|
/linux-4.4.14/lib/zlib_deflate/ |
H A D | Makefile | 2 # This is a modified version of zlib, which does all memory
|
/linux-4.4.14/arch/mips/include/asm/ |
H A D | sparsemem.h | 7 * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
|
H A D | barrier.h | 26 : "memory") 40 : "memory") 43 # define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory") 64 : "memory") 99 # define smp_mb() __asm__ __volatile__("sync" : : :"memory") 100 # define smp_rmb() __asm__ __volatile__("sync" : : :"memory") 101 # define smp_wmb() __asm__ __volatile__("sync" : : :"memory") 118 #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") 126 ".set pop" : : : "memory")
|
H A D | highmem.h | 2 * highmem.h: virtual kernel memory mappings for high memory 4 * Used in CONFIG_HIGHMEM systems for memory pages which 12 * up to 16 Terabyte physical memory. With current x86 CPUs
|
H A D | kvm_para.h | 23 : "=r" (r) : "r" (n) : "memory" kvm_hypercall0() 40 : "=r" (r) : "r" (n), "r" (a0) : "memory" kvm_hypercall1() 59 : "=r" (r) : "r" (n), "r" (a0), "r" (a1) : "memory" kvm_hypercall2() 80 : "=r" (r) : "r" (n), "r" (a0), "r" (a1), "r" (a2) : "memory" kvm_hypercall3()
|
/linux-4.4.14/arch/s390/mm/ |
H A D | Makefile | 2 # Makefile for the linux s390-specific parts of the memory manager.
|
/linux-4.4.14/arch/metag/include/asm/ |
H A D | sparsemem.h | 7 * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
|
H A D | io.h | 23 : "memory"); __raw_readb() 34 : "memory"); __raw_readw() 45 : "memory"); __raw_readl() 56 : "memory"); __raw_readq() 67 : "memory"); __raw_writeb() 77 : "memory"); __raw_writew() 87 : "memory"); __raw_writel() 97 : "memory"); __raw_writeq() 107 * Despite being a 32bit architecture, Meta can do 64bit memory accesses 141 * ioremap - map bus memory into CPU space 142 * @offset: bus address of the memory 146 * make bus memory CPU accessible via the readb/readw/readl/writeb/
|
H A D | global_lock.h | 12 * so that the compiler cannot reorder memory accesses across the lock. 26 : "memory"); \ 36 * compiler barrier so that the compiler cannot reorder memory accesses across 48 : "memory"); \ 58 * compiler cannot reorder memory accesses across the lock. 72 : "memory"); \ 83 * the compiler cannot reorder memory accesses across the unlock. 97 : "memory"); \
|
H A D | hwthread.h | 19 * Each hardware thread's Control Unit registers are memory-mapped 22 * This helper function returns the memory address where "thread"'s
|
H A D | fixmap.h | 2 * fixmap.h: compile-time virtual memory allocation 27 * from the end of the consistent memory region backwards. 32 * these 'compile-time allocated' memory buffers are 35 * physical memory with fixmap indices.
|
/linux-4.4.14/arch/frv/mm/ |
H A D | Makefile | 2 # Makefile for the arch-specific parts of the memory manager.
|
/linux-4.4.14/arch/ia64/mm/ |
H A D | Makefile | 2 # Makefile for the ia64-specific parts of the memory manager.
|
/linux-4.4.14/arch/m32r/mm/ |
H A D | Makefile | 2 # Makefile for the Linux M32R-specific parts of the memory manager.
|
/linux-4.4.14/drivers/net/ethernet/cavium/liquidio/ |
H A D | octeon_mem_ops.h | 24 * \brief Host Driver: Routines used to read/write Octeon memory. 30 /** Read a 64-bit value from a BAR1 mapped core memory address. 37 * @return 64-bit value read from Core memory 41 /** Read a 32-bit value from a BAR1 mapped core memory address. 45 * @return 32-bit value read from Core memory 49 /** Write a 32-bit value to a BAR1 mapped core memory address. 59 /** Read multiple bytes from Octeon memory. 67 /** Write multiple bytes into Octeon memory.
|
/linux-4.4.14/drivers/clk/versatile/ |
H A D | clk-icst.h | 6 * @vco_offset: offset to the ICST VCO from the provided memory base 8 * memory base
|
/linux-4.4.14/arch/x86/boot/compressed/ |
H A D | string.c | 13 : "memory"); memcpy() 27 : "memory"); memcpy()
|
/linux-4.4.14/arch/mips/dec/prom/ |
H A D | memory.c | 2 * memory.c: memory initialisation code. 24 * Probe memory in 4MB chunks, waiting for an error to tell us we've fallen 25 * off the end of real memory. Only suitable for the 2100/3100's (PMAX). 57 * Use the REX prom calls to get hold of the memory bitmap, and thence 58 * determine memory size. 106 * Leave 128 KB reserved for Lance memory for prom_free_prom_memory() 117 free_init_pages("unused PROM memory", PAGE_SIZE, end); prom_free_prom_memory()
|
/linux-4.4.14/arch/arm/mach-ebsa110/include/mach/ |
H A D | memory.h | 2 * arch/arm/mach-ebsa110/include/mach/memory.h 13 * 21-Mar-1999 RMK Renamed to memory.h
|
/linux-4.4.14/arch/arc/include/asm/ |
H A D | barrier.h | 20 * - Ensures that selected memory operation issued before it will complete 21 * before any subsequent memory operation of same type 29 #define mb() asm volatile("dmb 3\n" : : : "memory") 30 #define rmb() asm volatile("dmb 1\n" : : : "memory") 31 #define wmb() asm volatile("dmb 2\n" : : : "memory") 43 #define mb() asm volatile("sync\n" : : : "memory")
|
/linux-4.4.14/drivers/acpi/ |
H A D | acpi_memhotplug.c | 20 * This driver fields notifications from firmware for memory add 21 * and remove operations and alerts the VM of the affected memory 26 #include <linux/memory.h> 31 #define ACPI_MEMORY_DEVICE_CLASS "memory" 71 unsigned short caching; /* memory cache attribute */ 72 unsigned short write_protect; /* memory read/write attribute */ 78 unsigned int state; /* State of the memory device */ 212 * Tell the VM there is more memory here... acpi_memory_enable_device() 214 * We don't have memory-hot-add rollback function,now. acpi_memory_enable_device() 215 * (i.e. memory-hot-remove function) acpi_memory_enable_device() 223 * If the memory block size is zero, please ignore it. acpi_memory_enable_device() 224 * Don't try to do the following memory hotplug flowchart. acpi_memory_enable_device() 234 * If the memory block has been used by the kernel, add_memory() acpi_memory_enable_device() 236 * means that this memory block is not used by the kernel. acpi_memory_enable_device() 261 * Sometimes the memory device will contain several memory blocks. acpi_memory_enable_device() 262 * When one memory block is hot-added to the system memory, it will acpi_memory_enable_device() 264 * Otherwise if the last memory block can't be hot-added to the system acpi_memory_enable_device() 265 * memory, it will be failure and the memory device can't be bound with acpi_memory_enable_device() 369 acpi_scan_add_handler_with_hotplug(&memory_device_handler, "memory"); acpi_memory_hotplug_init()
|
/linux-4.4.14/arch/mips/mti-malta/ |
H A D | malta-dtshim.c | 22 /* determined physical memory size, not overridden by command line args */ 61 /* if a memory node already exists, leave it alone */ append_memory() 62 mem_off = fdt_path_offset(fdt, "/memory"); append_memory() 66 /* find memory size from the bootloader environment */ append_memory() 88 * the last word of physical memory. append_memory() 96 /* allow the user to override the usable memory */ append_memory() 109 /* append memory to the DT */ append_memory() 110 mem_off = fdt_add_subnode(fdt, root_off, "memory"); append_memory() 112 panic("Unable to add memory node to DT: %d", mem_off); append_memory() 114 err = fdt_setprop_string(fdt, mem_off, "device_type", "memory"); append_memory() 116 panic("Unable to set memory node device_type: %d", err); append_memory() 122 panic("Unable to set memory regs property: %d", err); append_memory() 125 err = fdt_setprop(fdt, mem_off, "linux,usable-memory", mem_array, append_memory() 128 panic("Unable to set linux,usable-memory property: %d", err); append_memory()
|
/linux-4.4.14/tools/testing/selftests/vm/ |
H A D | hugepage-shm.c | 4 * Example of using huge page memory in a user application using Sys V shared 5 * memory system calls. In this example the app is requesting 256MB of 6 * memory that is backed by huge pages. The application uses the flag 17 * Note: The default shared memory limit is quite low on many kernels, 22 * This will increase the maximum size per shared memory segment to 256MB. 24 * total amount of shared memory in pages. To set it to 16GB on a system 69 perror("Shared memory attach failure"); main()
|
/linux-4.4.14/arch/mips/include/asm/octeon/ |
H A D | cvmx-bootmem.h | 29 * Simple allocate only memory allocator. Used to allocate memory at 51 /* First bytes of each free physical block of memory contain this structure, 52 * which is used to maintain the free memory list. Since the bootloader is 69 * Structure for named memory blocks. Number of descriptors available 72 * structure must be naturally 64 bit aligned, as a single memory 122 /* address of named memory block descriptors */ 141 * Initialize the boot alloc memory structures. This is 144 * @mem_desc_ptr: Address of the free memory list 149 * Allocate a block of memory from the free list that was passed 151 * This is an allocate-only algorithm, so freeing memory is not possible. 156 * Returns pointer to block of memory, NULL on error 161 * Allocate a block of memory from the free list that was 164 * freeing memory is not possible. Allocation will fail if 165 * memory cannot be allocated at the specified address. 168 * @address: Physical address to allocate memory at. If this memory is not 171 * Returns pointer to block of memory, NULL on error 177 * Allocate a block of memory from the free list that was 180 * freeing memory is not possible. Allocation will fail if 181 * memory cannot be allocated in the requested range. 187 * Returns pointer to block of memory, NULL on error 203 * Allocate a block of memory from the free list that was passed 212 * Returns a pointer to block of memory, NULL on error 220 * Allocate a block of memory from the free list that was passed 226 * @address: Physical address to allocate memory at. If this 227 * memory is not available, the allocation fails. 231 * Returns a pointer to block of memory, NULL on error 239 * Allocate a block of memory from a specific range of the free list 249 * @align: Alignment of memory to be allocated. (must be a power of 2) 252 * Returns a pointer to block of memory, NULL on error 271 * Allocates a block of physical memory from the free list, at 299 * Allocates a named block of physical memory from the free list, at 326 * Finds a named memory block by name. 329 * @name: Name of memory block to find. If NULL pointer given, then 334 * Returns Pointer to memory block descriptor, NULL if not found. 335 * If NULL returned when name parameter is NULL, then no memory 359 * frees and initial population of the free memory list.
|
/linux-4.4.14/drivers/firmware/ |
H A D | memmap.c | 31 * Firmware map entry. Because firmware memory maps are flat and not 40 u64 start; /* start of the memory range */ 41 u64 end; /* end of the memory range (incl.) */ 42 const char *type; /* type of the memory range */ 86 /* Firmware memory map entries. */ 91 * For memory hotplug, there is no way to free memory map entries allocated 92 * by boot mem after the system is up. So when we hot-remove memory whose 94 * reuse it when the memory is hot-added again. 113 * the memory is hot-added again. The entry will be added to release_firmware_map_entry() 139 * @start: Start of the memory range. 140 * @end: End of the memory range (exclusive). 141 * @type: Type of the memory range. 215 * @start: Start of the memory range. 216 * @end: End of the memory range (exclusive). 217 * @type: Type of the memory range. 220 * This function is to find the memmap entey of a given memory range in a 243 * @start: Start of the memory range. 244 * @end: End of the memory range (exclusive). 245 * @type: Type of the memory range. 247 * This function is to find the memmap entey of a given memory range. 261 * @start: Start of the memory range. 262 * @end: End of the memory range (exclusive). 263 * @type: Type of the memory range. 279 * memory hotplug. 280 * @start: Start of the memory range. 281 * @end: End of the memory range (exclusive) 282 * @type: Type of the memory range. 284 * Adds a firmware mapping entry. This function is for memory hotplug, it is 288 * Return: 0 on success, or -ENOMEM if no memory could be allocated. 321 * @start: Start of the memory range. 322 * @end: End of the memory range. 323 * @type: Type of the memory range. 326 * for memory allocation. 330 * Return: 0 on success, or -ENOMEM if no memory could be allocated. 345 * @start: Start of the memory range. 346 * @end: End of the memory range. 347 * @type: Type of the memory range.
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
H A D | ipath_mr.c | 42 /* Fast memory region */ 55 * ipath_get_dma_mr - get a DMA memory region 56 * @pd: protection domain for this memory region 59 * Returns the memory region on success, otherwise returns an errno. 124 * ipath_reg_phys_mr - register a physical memory region 125 * @pd: protection domain for this memory region 130 * Returns the memory region on success, otherwise returns an errno. 175 * ipath_reg_user_mr - register a userspace memory region 176 * @pd: protection domain for this memory region 180 * @mr_access_flags: access flags for this memory region 183 * Returns the memory region on success, otherwise returns an errno. 247 * ipath_dereg_mr - unregister and free a memory region 248 * @ibmr: the memory region to free 275 * ipath_alloc_fmr - allocate a fast memory region 276 * @pd: the protection domain for this memory region 277 * @mr_access_flags: access flags for this memory region 278 * @fmr_attr: fast memory region attributes 280 * Returns the memory region on success, otherwise returns an errno. 338 * ipath_map_phys_fmr - set up a fast memory region 339 * @ibmfr: the fast memory region to set up 340 * @page_list: the list of pages to associate with the fast memory region 341 * @list_len: the number of pages to associate with the fast memory region 342 * @iova: the virtual address of the start of the fast memory region 386 * ipath_unmap_fmr - unmap fast memory regions 387 * @fmr_list: the list of fast memory regions to unmap 409 * ipath_dealloc_fmr - deallocate a fast memory region 410 * @ibfmr: the fast memory region to deallocate
|
/linux-4.4.14/arch/xtensa/include/asm/ |
H A D | spinlock.h | 49 : "memory"); arch_spin_lock() 65 : "memory"); arch_spin_trylock() 79 : "memory"); arch_spin_unlock() 114 : "memory"); arch_write_lock() 131 : "memory"); arch_write_trylock() 145 : "memory"); arch_write_unlock() 162 : "memory"); arch_read_lock() 182 : "memory"); arch_read_trylock() 199 : "memory"); arch_read_unlock()
|
H A D | irqflags.h | 36 : "=a" (flags), "=a" (tmp) :: "memory"); arch_local_irq_save() 42 : "=&a" (flags) : "a" (LOCKLEVEL) : "memory"); arch_local_irq_save() 46 : "=a" (flags) :: "memory"); arch_local_irq_save() 59 asm volatile("rsil %0, 0" : "=a" (flags) :: "memory"); arch_local_irq_enable() 65 :: "a" (flags) : "memory"); arch_local_irq_restore()
|
H A D | barrier.h | 12 #define mb() ({ __asm__ __volatile__("memw" : : : "memory"); })
|
/linux-4.4.14/arch/tile/include/hv/ |
H A D | drv_mshim_intf.h | 17 * Interface definitions for the Linux EDAC memory controller driver. 23 /** Number of memory controllers in the public API. */ 26 /** Memory info under each memory controller. */ 29 uint64_t mem_size; /**< Total memory size in bytes. */ 44 /** Read this offset to get the memory info per mshim. */
|
/linux-4.4.14/arch/sh/boards/mach-sdk7786/ |
H A D | sram.c | 39 pr_err("FPGA memory unmapped.\n"); fpga_sram_init() 44 * The memory itself occupies a 2KiB range at the top of the area fpga_sram_init() 55 pr_err("Failed remapping FPGA memory.\n"); fpga_sram_init() 59 pr_info("Adding %dKiB of FPGA memory at 0x%08lx-0x%08lx " fpga_sram_init() 65 pr_err("Failed adding memory\n"); fpga_sram_init()
|
/linux-4.4.14/arch/arm/kernel/ |
H A D | io.c | 41 * Copy data from IO memory space to "real" memory space. 57 * Copy data from "real" memory space to IO memory space. 73 * "memset" on IO memory space.
|
/linux-4.4.14/tools/perf/ |
H A D | perf-sys.h | 12 #define cpu_relax() asm volatile("rep; nop" ::: "memory"); 26 #define cpu_relax() asm volatile("rep; nop" ::: "memory"); 65 #define cpu_relax() asm volatile ("hint @pause" ::: "memory") 74 #define cpu_relax() asm volatile("yield" ::: "memory") 94 #define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory")
|
/linux-4.4.14/mm/ |
H A D | page_ext.c | 5 #include <linux/memory.h> 14 * This is the feature to manage memory for extended data per page. 22 * allocates memory for extended data per page in certain place rather than 23 * the struct page itself. This memory can be accessed by the accessor 25 * allocation of huge chunk of memory is needed or not. If not, it avoids 26 * allocating memory at all. With this advantage, we can include this feature 31 * memory allocation at boot-time. The other is optional, init callback, which 32 * is used to do proper initialization after memory is allocated. 34 * The need callback is used to decide whether extended memory allocation is 36 * boot and extra memory would be unneccessary. In this case, to avoid 37 * allocating huge chunk of memory, each clients represent their need of 38 * extra memory through the need callback. If one of the need callbacks 39 * returns true, it means that someone needs extra memory so that 40 * page extension core should allocates memory for page extension. If 41 * none of need callbacks return true, memory isn't needed at all in this boot 42 * and page extension core can skip to allocate memory. As result, 43 * none of memory is wasted. 46 * is completely initialized. In sparse memory system, extra memory is 48 * of memory for page extension isn't same with memmap for struct page. 114 * for the first time during bootup or memory hotplug. lookup_page_ext() 174 panic("Out of memory"); 188 * for the first time during bootup or memory hotplug. lookup_page_ext() 231 * and it does not point to the memory block allocated above, init_section_page_ext() 291 * In this case, "nid" already exists and contains valid memory. online_page_ext() 400 panic("Out of memory");
|
H A D | zpool.c | 2 * zpool memory storage api 6 * This is a common frontend for memory storage pool implementations. 7 * Typically, this is used to store compressed memory. 144 * used when allocating memory, if the implementation supports it. If the 175 pr_err("couldn't create zpool - out of memory\n"); zpool_create_pool() 239 * zpool_malloc() - Allocate memory 241 * @size The amount of memory to allocate. 242 * @gfp The GFP flags to use when allocating memory. 245 * This allocates the requested amount of memory from the pool. 246 * The gfp flags will be used when allocating memory, if the 261 * zpool_free() - Free previously allocated memory 262 * @pool The zpool that allocated the memory. 263 * @handle The handle to the memory to free. 265 * This frees previously allocated memory. This does not guarantee 266 * that the pool will actually free memory, only that the memory 285 * This attempts to shrink the actual memory size of the pool 303 * zpool_map_handle() - Map a previously allocated handle into memory 306 * @mm How the memory should be mapped 308 * This maps a previously allocated handle into memory. The @mm 309 * param indicates to the implementation how the memory will be 311 * implementation does not support it, the memory will be treated 317 * its operatons on the mapped handle memory quickly and unmap 322 * Returns: A pointer to the handle's mapped memory area. 337 * will be undone here. The memory area returned from 360 MODULE_DESCRIPTION("Common API for compressed memory storage");
|
/linux-4.4.14/drivers/gpu/drm/ |
H A D | drm_agpsupport.c | 186 * Allocate AGP memory. 195 * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it. 200 struct agp_memory *memory; drm_agp_alloc() local 211 if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) { drm_agp_alloc() 216 entry->handle = (unsigned long)memory->key + 1; drm_agp_alloc() 217 entry->memory = memory; drm_agp_alloc() 220 list_add(&entry->head, &dev->agp->memory); drm_agp_alloc() 223 request->physical = memory->physical; drm_agp_alloc() 239 * Search for the AGP memory entry associated with a handle. 242 * \param handle AGP memory handle. 245 * Walks through drm_agp_head::memory until finding a matching handle. 252 list_for_each_entry(entry, &dev->agp->memory, head) { drm_agp_lookup_entry() 260 * Unbind AGP memory from the GATT (ioctl). 268 * Verifies the AGP device is present and acquired, looks-up the AGP memory 282 ret = drm_unbind_agp(entry->memory); drm_agp_unbind() 299 * Bind AGP memory into the GATT (ioctl) 307 * Verifies the AGP device is present and has been acquired and that no memory 308 * is currently bound into the GATT. Looks-up the AGP memory entry and passes 324 if ((retcode = drm_bind_agp(entry->memory, page))) drm_agp_bind() 343 * Free AGP memory (ioctl). 352 * AGP memory entry. If the memory it's currently bound, unbind it via 365 drm_unbind_agp(entry->memory); drm_agp_free() 369 drm_free_agp(entry->memory, entry->pages); drm_agp_free() 418 INIT_LIST_HEAD(&head->memory); drm_agp_init() 446 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { drm_agp_clear() 448 drm_unbind_agp(entry->memory); drm_agp_clear() 449 drm_free_agp(entry->memory, entry->pages); drm_agp_clear() 452 INIT_LIST_HEAD(&dev->agp->memory); drm_agp_clear() 462 * Binds a collection of pages into AGP memory at the given offset, returning 463 * the AGP memory structure containing them. 483 DRM_ERROR("Failed to allocate memory for %ld pages\n", drm_agp_bind_pages() 495 DRM_ERROR("Failed to bind AGP memory: %d\n", ret); drm_agp_bind_pages()
|
/linux-4.4.14/drivers/staging/xgifb/ |
H A D | vgatypes.h | 26 void __iomem *pjVideoMemoryAddress;/* base virtual memory address */ 27 /* of Linear VGA memory */ 30 memory on the board */
|
/linux-4.4.14/arch/tile/include/asm/ |
H A D | atomic.h | 69 * atomic_inc_return - increment memory and return 77 * atomic_dec_return - decrement memory and return 117 * atomic_xchg - atomically exchange contents of memory with a new value 119 * @i: integer value to store in memory 129 * atomic_cmpxchg - atomically exchange contents of memory if it matches 131 * @o: old value that memory should have 132 * @n: new value to write to memory if it matches 163 * atomic64_xchg - atomically exchange contents of memory with a new value 165 * @i: integer value to store in memory 175 * atomic64_cmpxchg - atomically exchange contents of memory if it matches 177 * @o: old value that memory should have 178 * @n: new value to write to memory if it matches
|
/linux-4.4.14/arch/x86/lib/ |
H A D | string_32.c | 9 * for large memory blocks. But most of them are unlikely to be used on large 25 : "0" (src), "1" (dest) : "memory"); strcpy() 45 : "0" (src), "1" (dest), "2" (count) : "memory"); strncpy() 63 : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu) : "memory"); strcat() 87 : "memory"); strncat() 110 : "memory"); strcmp() 135 : "memory"); strncmp() 157 : "memory"); strchr() 172 : "memory"); strlen() 192 : "memory"); memchr() 209 : "memory"); memscan() 231 : "memory"); strnlen()
|
/linux-4.4.14/arch/x86/um/asm/ |
H A D | barrier.h | 26 #define mb() asm volatile("mfence" : : : "memory") 27 #define rmb() asm volatile("lfence" : : : "memory") 28 #define wmb() asm volatile("sfence" : : : "memory")
|
/linux-4.4.14/arch/alpha/include/asm/ |
H A D | xchg.h | 6 * except that local version do not have the expensive memory barrier. 13 * it must clobber "memory" (also for interrupts in UP). 35 : "r" ((long)m), "1" (val) : "memory"); ____xchg() 59 : "r" ((long)m), "1" (val) : "memory"); ____xchg() 79 : "rI" (val), "m" (*m) : "memory"); ____xchg() 99 : "rI" (val), "m" (*m) : "memory"); ____xchg() 130 * The memory barrier should be placed in SMP only when we actually 133 * we don't need any memory barrier as far I can tell. 158 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); ____cmpxchg() 185 : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); ____cmpxchg() 208 : "r"((long) old), "r"(new), "m"(*m) : "memory"); ____cmpxchg() 231 : "r"((long) old), "r"(new), "m"(*m) : "memory"); ____cmpxchg()
|
H A D | barrier.h | 6 #define mb() __asm__ __volatile__("mb": : :"memory") 7 #define rmb() __asm__ __volatile__("mb": : :"memory") 8 #define wmb() __asm__ __volatile__("wmb": : :"memory") 14 * No data-dependent reads from memory-like regions are ever reordered 16 * to access memory (but not necessarily other CPUs' caches) before any 61 #define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
|
H A D | spinlock.h | 47 : "m"(lock->lock) : "memory"); arch_spin_lock() 84 : "m" (*lock) : "memory"); arch_read_lock() 104 : "m" (*lock) : "memory"); arch_write_lock() 124 : "m" (*lock) : "memory"); arch_read_trylock() 146 : "m" (*lock) : "memory"); arch_write_trylock() 164 : "m" (*lock) : "memory"); arch_read_unlock()
|
/linux-4.4.14/arch/s390/include/asm/ |
H A D | irqflags.h | 17 : "=Q" (__mask) : "i" (__or) : "memory"); \ 27 : "=Q" (__mask) : "i" (__and) : "memory"); \ 34 asm volatile("ssm %0" : : "Q" (flags) : "memory"); __arch_local_irq_ssm()
|
H A D | module.h | 20 /* Starting offset of got in the module core memory. */ 22 /* Starting offset of plt in the module core memory. */
|
H A D | dma.h | 8 * to DMA. It _is_ used for the s390 memory zone split at 2GB caused
|
/linux-4.4.14/arch/avr32/include/asm/ |
H A D | irqflags.h | 29 asm volatile("" : : : "memory", "cc"); arch_local_irq_restore() 34 asm volatile("ssrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory"); arch_local_irq_disable() 39 asm volatile("csrf %0" : : "n"(SYSREG_GM_OFFSET) : "memory"); arch_local_irq_enable()
|
H A D | cacheflush.h | 21 * writing anything back to memory. 31 : "memory"); invalidate_dcache_line() 36 * to memory. 43 : "memory"); clean_dcache_line() 48 * to memory and then invalidate it. 55 : "memory"); flush_dcache_line() 67 : "memory"); invalidate_icache_line() 82 #define flush_write_buffer() asm volatile("sync 0" : : : "memory")
|
H A D | module.h | 12 /* Starting offset of got in the module core memory. */
|
/linux-4.4.14/net/sunrpc/xprtrdma/ |
H A D | physical_ops.c | 6 /* No-op chunk preparation. All client memory is pre-registered. 9 * Physical registration is simple because all client memory is 12 * trusted not to abuse its access to client memory not involved 44 /* PHYSICAL memory registration conveys one page per chunk segment. 59 /* The client's physical memory is already exposed for 75 /* Unmap a memory region, but leave it registered.
|
/linux-4.4.14/arch/mips/include/asm/dec/ |
H A D | ioasic_ints.h | 30 #define IO_INR_SCC0A_TXERR 30 /* SCC0A transmit memory read error */ 35 #define IO_INR_ASC_MERR 17 /* ASC memory read error */ 36 #define IO_INR_LANCE_MERR 16 /* LANCE memory read error */ 40 #define IO_INR_SCC1A_TXERR 26 /* SCC1A transmit memory read error */ 50 #define IO_INR_AB_TXERR 26 /* ACCESS.bus xmit memory read error */ 56 #define IO_INR_ISDN_ERR 20 /* ISDN memory read/overrun error */
|
/linux-4.4.14/arch/hexagon/include/asm/ |
H A D | cmpxchg.h | 26 * __xchg - atomically exchange a register and a memory location 28 * @ptr: pointer to memory 46 " memw_locked(%1,P0) = %2;\n" /* store into memory */ __xchg() 50 : "memory", "p0" __xchg() 56 * Atomically swap the contents of a register with memory. Should be atomic 84 : "memory", "p0" \
|
/linux-4.4.14/arch/powerpc/mm/ |
H A D | numa.c | 183 * Returns the property linux,drconf-usable-memory if 187 static const __be32 *of_get_usable_memory(struct device_node *memory) of_get_usable_memory() argument 191 prop = of_get_property(memory, "linux,drconf-usable-memory", &len); of_get_usable_memory() 368 struct device_node *memory = NULL; get_n_mem_cells() local 370 memory = of_find_node_by_type(memory, "memory"); get_n_mem_cells() 371 if (!memory) get_n_mem_cells() 372 panic("numa.c: No memory nodes found!"); get_n_mem_cells() 374 *n_addr_cells = of_n_addr_cells(memory); get_n_mem_cells() 375 *n_size_cells = of_n_size_cells(memory); get_n_mem_cells() 376 of_node_put(memory); get_n_mem_cells() 391 * Read the next memblock list entry from the ibm,dynamic-memory property 410 * Retrieve and validate the ibm,dynamic-memory property of the device tree. 412 * The layout of the ibm,dynamic-memory property is a number N of memblock 416 static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm) of_get_drconf_memory() argument 421 prop = of_get_property(memory, "ibm,dynamic-memory", &len); of_get_drconf_memory() 438 * Retrieve and validate the ibm,lmb-size property for drconf memory 441 static u64 of_get_lmb_size(struct device_node *memory) of_get_lmb_size() argument 446 prop = of_get_property(memory, "ibm,lmb-size", &len); of_get_lmb_size() 461 * memory from the ibm,associativity-lookup-arrays property of the 469 static int of_get_assoc_arrays(struct device_node *memory, of_get_assoc_arrays() argument 475 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); of_get_assoc_arrays() 493 * This is like of_node_to_nid_single() for memory represented in the 494 * ibm,dynamic-reconfiguration-memory node. 611 * Check and possibly modify a memory region to enforce the memory limit. 613 * Returns the size the region should have to enforce the memory limit. 616 * discarded as it lies wholly above the memory limit. 624 * having memory holes below the limit. Also, in the case of numa_enforce_memory_limit() 639 * linux,drconf-usable-memory property 644 * For each lmb in ibm,dynamic-memory a corresponding read_usm_ranges() 645 * entry in linux,drconf-usable-memory property contains read_usm_ranges() 647 * read the counter from linux,drconf-usable-memory read_usm_ranges() 653 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory 656 static void __init parse_drconf_memory(struct device_node *memory) parse_drconf_memory() argument 664 n = of_get_drconf_memory(memory, &dm); parse_drconf_memory() 668 lmb_size = of_get_lmb_size(memory); parse_drconf_memory() 672 rc = of_get_assoc_arrays(memory, &aa); parse_drconf_memory() 677 usm = of_get_usable_memory(memory); parse_drconf_memory() 714 &memblock.memory, nid); parse_drconf_memory() 721 struct device_node *memory; parse_numa_properties() local 753 * cpus into nodes once the memory scan has discovered for_each_present_cpu() 763 for_each_node_by_type(memory, "memory") { 771 memcell_buf = of_get_property(memory, 772 "linux,usable-memory", &len); 774 memcell_buf = of_get_property(memory, "reg", &len); 786 * Assumption: either all memory nodes or none will 790 nid = of_node_to_nid_single(memory); 804 memblock_set_node(start, size, &memblock.memory, nid); 812 * ibm,dynamic-memory property in the 813 * ibm,dynamic-reconfiguration-memory node. 815 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 816 if (memory) 817 parse_drconf_memory(memory); 835 for_each_memblock(memory, reg) { for_each_memblock() 842 &memblock.memory, nid); for_each_memblock() 921 /* Initialize NODE_DATA for a node on the local memory */ setup_node_data() 1036 * Find the node associated with a hot added memory section for 1037 * memory represented in the device tree by the property 1038 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory. 1040 static int hot_add_drconf_scn_to_nid(struct device_node *memory, hot_add_drconf_scn_to_nid() argument 1049 drconf_cell_cnt = of_get_drconf_memory(memory, &dm); hot_add_drconf_scn_to_nid() 1053 lmb_size = of_get_lmb_size(memory); hot_add_drconf_scn_to_nid() 1057 rc = of_get_assoc_arrays(memory, &aa); hot_add_drconf_scn_to_nid() 1084 * Find the node associated with a hot added memory section for memory 1085 * represented in the device tree as a node (i.e. memory@XXXX) for 1090 struct device_node *memory; hot_add_node_scn_to_nid() local 1093 for_each_node_by_type(memory, "memory") { hot_add_node_scn_to_nid() 1099 memcell_buf = of_get_property(memory, "reg", &len); hot_add_node_scn_to_nid() 1113 nid = of_node_to_nid_single(memory); hot_add_node_scn_to_nid() 1121 of_node_put(memory); hot_add_node_scn_to_nid() 1127 * Find the node associated with a hot added memory section. Section 1133 struct device_node *memory = NULL; hot_add_scn_to_nid() local 1139 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); hot_add_scn_to_nid() 1140 if (memory) { hot_add_scn_to_nid() 1141 nid = hot_add_drconf_scn_to_nid(memory, scn_addr); hot_add_scn_to_nid() 1142 of_node_put(memory); hot_add_scn_to_nid() 1166 struct device_node *memory = NULL; hot_add_drconf_memory_max() local 1171 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); hot_add_drconf_memory_max() 1172 if (memory) { hot_add_drconf_memory_max() 1173 drconf_cell_cnt = of_get_drconf_memory(memory, &dm); hot_add_drconf_memory_max() 1174 lmb_size = of_get_lmb_size(memory); hot_add_drconf_memory_max() 1175 of_node_put(memory); hot_add_drconf_memory_max() 1181 * memory_hotplug_max - return max address of memory that may be added 1183 * This is currently only used on systems that support drconfig memory
|
H A D | highmem.c | 2 * highmem.c: virtual kernel memory mappings for high memory 6 * Used in CONFIG_HIGHMEM systems for memory pages which 14 * up to 16 Terrabyte physical memory. With current x86 CPUs
|
/linux-4.4.14/arch/mips/fw/arc/ |
H A D | memory.c | 2 * memory.c: PROM library functions for acquiring/using memory descriptors 9 * PROM library functions for acquiring/using memory descriptors given to us 11 * because on some machines like SGI IP27 the ARC memory configuration data 30 * For ARC firmware memory functions the unit of meassuring memory is always 31 * a 4k page of memory 157 free_init_pages("prom memory", prom_free_prom_memory()
|
/linux-4.4.14/arch/arm/mm/ |
H A D | iomap.c | 4 * Map IO port and PCI memory spaces so that {read,write}[bwl] can 5 * be used to access this memory.
|
H A D | tcm.h | 4 * TCM memory handling for ARM systems
|
/linux-4.4.14/arch/microblaze/mm/ |
H A D | init.c | 43 * Initialize the bootmem system and give it all the memory we 114 /* We don't have holes in memory map */ paging_init() 126 /* Find main memory where is the kernel */ for_each_memblock() 127 for_each_memblock(memory, reg) { for_each_memblock() 142 panic("%s: Missing memory setting 0x%08x, size=0x%08x\n", 165 /* memory start is from the kernel end (aligned) to higher addr */ 181 * for 4GB of memory, using 4kB pages), plus 1 page 189 for_each_memblock(memory, reg) { for_each_memblock() 196 &memblock.memory, 0); for_each_memblock() 199 /* free bootmem is whole main memory */ 244 /* this will put all memory onto the freelists */ mem_init() 252 pr_info("Kernel virtual memory layout:\n"); mem_init() 292 memblock.memory.regions[0].size = memory_size; mm_cmdline_setup() 320 * MMU_init sets up the basic memory mappings for the kernel, 331 pr_emerg("Error memory count\n"); mmu_init() 335 if ((u32) memblock.memory.regions[0].size < 0x400000) { mmu_init() 340 if ((u32) memblock.memory.regions[0].size < kernel_tlb) { mmu_init() 341 pr_emerg("Kernel size is greater than memory node\n"); mmu_init() 345 /* Find main memory where the kernel is */ mmu_init() 346 memory_start = (u32) memblock.memory.regions[0].base; mmu_init() 347 lowmem_size = memory_size = (u32) memblock.memory.regions[0].size; mmu_init() 360 * memory. mmu_init() 368 /* Remove the init RAM disk from the available memory. */ mmu_init()
|
H A D | highmem.c | 2 * highmem.c: virtual kernel memory mappings for high memory 6 * Used in CONFIG_HIGHMEM systems for memory pages which 14 * up to 16 Terrabyte physical memory. With current x86 CPUs
|
/linux-4.4.14/sound/synth/ |
H A D | util_mem.c | 4 * Generic memory management routines for soundcard memory allocation 29 MODULE_DESCRIPTION("Generic memory management routines for soundcard memory allocation"); 35 * create a new memory manager 53 * free a memory manager 70 * allocate a memory block (without mutex) 106 * create a new memory block with the given size 135 * allocate a memory block (with mutex) 162 * free a memory block (with mutex) 176 * return available memory size
|
/linux-4.4.14/drivers/media/v4l2-core/ |
H A D | videobuf-vmalloc.c | 5 * (i.e. the buffers are not linear in physical memory, but fragmented 94 allocated memory and this memory is mmapped. videobuf_vm_close() 95 In this case, memory should be freed, videobuf_vm_close() 96 in order to do memory unmap. videobuf_vm_close() 168 switch (vb->memory) { __videobuf_iolock() 170 dprintk(1, "%s memory method MMAP\n", __func__); __videobuf_iolock() 174 printk(KERN_ERR "memory is not alloced/mmapped.\n"); __videobuf_iolock() 181 dprintk(1, "%s memory method USERPTR\n", __func__); __videobuf_iolock() 211 /* Try to remap memory */ __videobuf_iolock() 222 dprintk(1, "%s memory method OVERLAY/unknown\n", __func__); __videobuf_iolock() 264 /* Try to remap memory */ __videobuf_mmap_mapper() 329 /* mmapped memory can't be freed here, otherwise mmapped region videobuf_vmalloc_free() 330 would be released, while still needed. In this case, the memory videobuf_vmalloc_free() 332 So, it should free memory only if the memory were allocated for videobuf_vmalloc_free() 335 if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr) videobuf_vmalloc_free()
|
H A D | videobuf-dma-contig.c | 5 * (i.e. the buffers must be linear in physical memory) 49 dev_err(dev, "memory alloc size %ld failed\n", mem->size); __videobuf_dc_alloc() 106 allocated memory and this memory is mmapped. videobuf_vm_close() 107 In this case, memory should be freed, videobuf_vm_close() 108 in order to do memory unmap. videobuf_vm_close() 151 * videobuf_dma_contig_user_get() - setup user space memory pointer 155 * This function validates and sets up a pointer to user space memory. 156 * Only physically contiguous pfn-mapped memory is accepted. 245 switch (vb->memory) { __videobuf_iolock() 247 dev_dbg(q->dev, "%s memory method MMAP\n", __func__); __videobuf_iolock() 251 dev_err(q->dev, "memory is not alloced/mmapped.\n"); __videobuf_iolock() 256 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__); __videobuf_iolock() 262 /* allocate memory for the read() method */ __videobuf_iolock() 269 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__); __videobuf_iolock() 305 /* Try to remap memory */ __videobuf_mmap_mapper() 382 /* mmapped memory can't be freed here, otherwise mmapped region videobuf_dma_contig_free() 383 would be released, while still needed. In this case, the memory videobuf_dma_contig_free() 385 So, it should free memory only if the memory were allocated for videobuf_dma_contig_free() 388 if (buf->memory != V4L2_MEMORY_USERPTR) videobuf_dma_contig_free()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
H A D | mr.c | 56 /* Fast memory region */ 105 * hfi1_get_dma_mr - get a DMA memory region 106 * @pd: protection domain for this memory region 109 * Returns the memory region on success, otherwise returns an errno. 191 * hfi1_reg_phys_mr - register a physical memory region 192 * @pd: protection domain for this memory region 197 * Returns the memory region on success, otherwise returns an errno. 237 * hfi1_reg_user_mr - register a userspace memory region 238 * @pd: protection domain for this memory region 241 * @mr_access_flags: access flags for this memory region 244 * Returns the memory region on success, otherwise returns an errno. 309 * hfi1_dereg_mr - unregister and free a memory region 310 * @ibmr: the memory region to free 346 * Allocate a memory region usable with the 349 * Return the memory region on success, otherwise return an errno. 369 * hfi1_alloc_fmr - allocate a fast memory region 370 * @pd: the protection domain for this memory region 371 * @mr_access_flags: access flags for this memory region 372 * @fmr_attr: fast memory region attributes 374 * Returns the memory region on success, otherwise returns an errno. 424 * hfi1_map_phys_fmr - set up a fast memory region 425 * @ibmfr: the fast memory region to set up 426 * @page_list: the list of pages to associate with the fast memory region 427 * @list_len: the number of pages to associate with the fast memory region 428 * @iova: the virtual address of the start of the fast memory region 475 * hfi1_unmap_fmr - unmap fast memory regions 476 * @fmr_list: the list of fast memory regions to unmap 498 * hfi1_dealloc_fmr - deallocate a fast memory region 499 * @ibfmr: the fast memory region to deallocate
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
H A D | qib_mr.c | 39 /* Fast memory region */ 88 * qib_get_dma_mr - get a DMA memory region 89 * @pd: protection domain for this memory region 92 * Returns the memory region on success, otherwise returns an errno. 174 * qib_reg_phys_mr - register a physical memory region 175 * @pd: protection domain for this memory region 180 * Returns the memory region on success, otherwise returns an errno. 220 * qib_reg_user_mr - register a userspace memory region 221 * @pd: protection domain for this memory region 224 * @mr_access_flags: access flags for this memory region 227 * Returns the memory region on success, otherwise returns an errno. 292 * qib_dereg_mr - unregister and free a memory region 293 * @ibmr: the memory region to free 326 * Allocate a memory region usable with the 329 * Return the memory region on success, otherwise return an errno. 379 * qib_alloc_fmr - allocate a fast memory region 380 * @pd: the protection domain for this memory region 381 * @mr_access_flags: access flags for this memory region 382 * @fmr_attr: fast memory region attributes 384 * Returns the memory region on success, otherwise returns an errno. 434 * qib_map_phys_fmr - set up a fast memory region 435 * @ibmfr: the fast memory region to set up 436 * @page_list: the list of pages to associate with the fast memory region 437 * @list_len: the number of pages to associate with the fast memory region 438 * @iova: the virtual address of the start of the fast memory region 485 * qib_unmap_fmr - unmap fast memory regions 486 * @fmr_list: the list of fast memory regions to unmap 508 * qib_dealloc_fmr - deallocate a fast memory region 509 * @ibfmr: the fast memory region to deallocate
|
/linux-4.4.14/drivers/staging/sm750fb/ |
H A D | ddk750_chip.h | 49 * Speed of memory clock in MHz unit 51 * Others = the new memory clock 64 * 0 = Do not reset the memory controller 65 * 1 = Reset the memory controller
|
/linux-4.4.14/drivers/net/arcnet/ |
H A D | com9026.h | 11 #define COM9026_REG_RW_MEMDATA 12 /* Data port for IO-mapped memory */
|
/linux-4.4.14/arch/sparc/mm/ |
H A D | Makefile | 1 # Makefile for the linux Sparc-specific parts of the memory manager.
|
/linux-4.4.14/arch/sparc/prom/ |
H A D | Makefile | 9 lib-$(CONFIG_SPARC32) += memory.o
|
/linux-4.4.14/arch/um/kernel/ |
H A D | physmem.c | 53 "memory size>/4096\n"); map_memory() 60 * setup_physmem() - Setup physical memory for UML 61 * @start: Start address of the physical kernel memory, 63 * @reserve_end: end address of the physical kernel memory. 64 * @len: Length of total physical memory that should be mapped/made 68 * Creates an unlinked temporary file of size (len + highmem) and memory maps 71 * The offset is needed as the length of the total physical memory 72 * (len + highmem) includes the size of the memory used be the executable image, 76 * The memory mapped memory of the temporary file is used as backing memory 92 printf("Too few physical memory! Needed=%d, given=%d\n", setup_physmem() 102 printf("setup_physmem - mapping %ld bytes of memory at 0x%p " setup_physmem() 158 " This controls how much \"physical\" memory the kernel allocates\n" 161 " This is not related to the amount of memory in the host. It can\n" 170 " Configure <file> as an IO memory region named <name>.\n\n"
|
/linux-4.4.14/arch/c6x/include/asm/ |
H A D | uaccess.h | 42 : "memory"); __copy_from_user() 50 : "memory"); __copy_from_user() 78 : "memory"); __copy_to_user() 86 : "memory"); __copy_to_user()
|
/linux-4.4.14/arch/cris/arch-v32/mm/ |
H A D | l2cache.c | 21 /* Flush the tag memory */ l2cache_init()
|
/linux-4.4.14/include/media/ |
H A D | videobuf2-vmalloc.h | 2 * videobuf2-vmalloc.h - vmalloc memory allocator for videobuf2
|
/linux-4.4.14/include/asm-generic/bitops/ |
H A D | find.h | 6 * find_next_bit - find the next set bit in a memory region 20 * find_next_zero_bit - find the next cleared bit in a memory region 35 * find_first_bit - find the first set bit in a memory region 46 * find_first_zero_bit - find the first cleared bit in a memory region
|
/linux-4.4.14/include/linux/mtd/ |
H A D | pismo.h | 2 * PISMO memory driver - http://www.pismoworld.org/
|
/linux-4.4.14/arch/score/include/asm/ |
H A D | irqflags.h | 48 : "r8", "r9", "memory"); arch_local_irq_save() 68 : "r8", "r9", "memory"); arch_local_irq_restore() 86 : "r8", "memory"); arch_local_irq_enable() 105 : "r8", "memory"); arch_local_irq_disable()
|
/linux-4.4.14/arch/metag/mm/ |
H A D | Makefile | 2 # Makefile for the linux Meta-specific parts of the memory manager.
|
H A D | maccess.c | 2 * safe read and write memory routines callable while atomic 13 * precise. We override it here to avoid these things happening to memory mapped 14 * IO memory where they could have undesired effects. 39 if (pte & MMCU_ENTRY_WRC_BIT || /* write combined memory */ probe_kernel_write()
|
/linux-4.4.14/arch/mips/sgi-ip27/ |
H A D | Makefile | 6 ip27-klnuma.o ip27-memory.o ip27-nmi.o ip27-reset.o ip27-timer.o \
|
/linux-4.4.14/arch/ia64/include/asm/sn/ |
H A D | mspec.h | 44 * be a cached entry with the memory type bits all set. This address 46 * uncached via the memory special memory type. If any portion of the 48 * overwrite the uncached value in physical memory and lead to
|
/linux-4.4.14/arch/arm/mach-orion5x/ |
H A D | tsx09-common.h | 10 * QNAP TS-x09 Boards function to find Ethernet MAC address in flash memory
|
/linux-4.4.14/arch/arm/mach-s3c64xx/ |
H A D | regs-gpio-memport.h | 7 * S3C64XX - GPIO memory port register definitions
|
/linux-4.4.14/sound/pci/emu10k1/ |
H A D | Makefile | 7 irq.o memory.o voice.o emumpu401.o emupcm.o io.o \
|