Home
last modified time | relevance | path

Searched refs:memory (Results 1 – 200 of 2162) sorted by relevance

1234567891011

/linux-4.4.14/tools/testing/selftests/memory-hotplug/
Dmem-on-off-test.sh21 if ! ls $SYSFS/devices/system/memory/memory* > /dev/null 2>&1; then
22 echo $msg memory hotplug is not supported >&2
34 for memory in $SYSFS/devices/system/memory/memory*; do
35 if grep -q 1 $memory/removable &&
36 grep -q $state $memory/state; then
37 echo ${memory##/*/memory}
54 grep -q online $SYSFS/devices/system/memory/memory$1/state
59 grep -q offline $SYSFS/devices/system/memory/memory$1/state
64 echo online > $SYSFS/devices/system/memory/memory$1/state
69 echo offline > $SYSFS/devices/system/memory/memory$1/state
[all …]
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
Dbase.c32 #define nvkm_instobj(p) container_of((p), struct nvkm_instobj, memory)
35 struct nvkm_memory memory; member
44 nvkm_instobj_target(struct nvkm_memory *memory) in nvkm_instobj_target() argument
46 memory = nvkm_instobj(memory)->parent; in nvkm_instobj_target()
47 return nvkm_memory_target(memory); in nvkm_instobj_target()
51 nvkm_instobj_addr(struct nvkm_memory *memory) in nvkm_instobj_addr() argument
53 memory = nvkm_instobj(memory)->parent; in nvkm_instobj_addr()
54 return nvkm_memory_addr(memory); in nvkm_instobj_addr()
58 nvkm_instobj_size(struct nvkm_memory *memory) in nvkm_instobj_size() argument
60 memory = nvkm_instobj(memory)->parent; in nvkm_instobj_size()
[all …]
Dnv50.c42 #define nv50_instobj(p) container_of((p), struct nv50_instobj, memory)
45 struct nvkm_memory memory; member
53 nv50_instobj_target(struct nvkm_memory *memory) in nv50_instobj_target() argument
59 nv50_instobj_addr(struct nvkm_memory *memory) in nv50_instobj_addr() argument
61 return nv50_instobj(memory)->mem->offset; in nv50_instobj_addr()
65 nv50_instobj_size(struct nvkm_memory *memory) in nv50_instobj_size() argument
67 return (u64)nv50_instobj(memory)->mem->size << NVKM_RAM_MM_SHIFT; in nv50_instobj_size()
71 nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm) in nv50_instobj_boot() argument
73 struct nv50_instobj *iobj = nv50_instobj(memory); in nv50_instobj_boot()
76 u64 size = nvkm_memory_size(memory); in nv50_instobj_boot()
[all …]
Dnv04.c38 #define nv04_instobj(p) container_of((p), struct nv04_instobj, memory)
41 struct nvkm_memory memory; member
47 nv04_instobj_target(struct nvkm_memory *memory) in nv04_instobj_target() argument
53 nv04_instobj_addr(struct nvkm_memory *memory) in nv04_instobj_addr() argument
55 return nv04_instobj(memory)->node->offset; in nv04_instobj_addr()
59 nv04_instobj_size(struct nvkm_memory *memory) in nv04_instobj_size() argument
61 return nv04_instobj(memory)->node->length; in nv04_instobj_size()
65 nv04_instobj_acquire(struct nvkm_memory *memory) in nv04_instobj_acquire() argument
67 struct nv04_instobj *iobj = nv04_instobj(memory); in nv04_instobj_acquire()
73 nv04_instobj_release(struct nvkm_memory *memory) in nv04_instobj_release() argument
[all …]
Dnv40.c40 #define nv40_instobj(p) container_of((p), struct nv40_instobj, memory)
43 struct nvkm_memory memory; member
49 nv40_instobj_target(struct nvkm_memory *memory) in nv40_instobj_target() argument
55 nv40_instobj_addr(struct nvkm_memory *memory) in nv40_instobj_addr() argument
57 return nv40_instobj(memory)->node->offset; in nv40_instobj_addr()
61 nv40_instobj_size(struct nvkm_memory *memory) in nv40_instobj_size() argument
63 return nv40_instobj(memory)->node->length; in nv40_instobj_size()
67 nv40_instobj_acquire(struct nvkm_memory *memory) in nv40_instobj_acquire() argument
69 struct nv40_instobj *iobj = nv40_instobj(memory); in nv40_instobj_acquire()
74 nv40_instobj_release(struct nvkm_memory *memory) in nv40_instobj_release() argument
[all …]
Dgk20a.c53 struct nvkm_memory memory; member
61 #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
116 gk20a_instobj_target(struct nvkm_memory *memory) in gk20a_instobj_target() argument
122 gk20a_instobj_addr(struct nvkm_memory *memory) in gk20a_instobj_addr() argument
124 return gk20a_instobj(memory)->mem.offset; in gk20a_instobj_addr()
128 gk20a_instobj_size(struct nvkm_memory *memory) in gk20a_instobj_size() argument
130 return (u64)gk20a_instobj(memory)->mem.size << 12; in gk20a_instobj_size()
134 gk20a_instobj_cpu_map_dma(struct nvkm_memory *memory) in gk20a_instobj_cpu_map_dma() argument
137 struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); in gk20a_instobj_cpu_map_dma()
139 int npages = nvkm_memory_size(memory) >> 12; in gk20a_instobj_cpu_map_dma()
[all …]
/linux-4.4.14/Documentation/
Dmemory-hotplug.txt6 Add description of notifier of memory hotplug Oct 11 2007
8 This document is about memory hotplug including how-to-use and current status.
13 1.1 purpose of memory hotplug
14 1.2. Phases of memory hotplug
17 3. sysfs files for memory hotplug
18 4. Physical memory hot-add phase
20 4.2 Notify memory hot-add event by hand
22 5.1. State of memory
23 5.2. How to online memory
24 6. Logical memory remove
[all …]
Dbus-virt-phys-mapping.txt12 (because all bus master devices see the physical memory mappings directly).
15 at memory addresses, and in this case we actually want the third, the
18 Essentially, the three ways of addressing memory are (this is "real memory",
22 0 is what the CPU sees when it drives zeroes on the memory bus.
28 - bus address. This is the address of memory as seen by OTHER devices,
30 addresses, with each device seeing memory in some device-specific way, but
33 external hardware sees the memory the same way.
37 because the memory and the devices share the same address space, and that is
41 CPU sees a memory map something like this (this is from memory):
43 0-2 GB "real memory"
[all …]
Dkmemleak.txt7 Kmemleak provides a way of detecting possible kernel memory leaks in a
12 Valgrind tool (memcheck --leak-check) to detect the memory leaks in
20 thread scans the memory every 10 minutes (by default) and prints the
22 the possible memory leaks:
27 To trigger an intermediate memory scan:
31 To clear the list of all current possible memory leaks:
48 scan=on - start the automatic memory scanning thread (default)
49 scan=off - stop the automatic memory scanning thread
50 scan=<secs> - set the automatic memory scanning period in seconds
52 scan - trigger a memory scan
[all …]
Dramoops.txt17 Ramoops uses a predefined memory area to store the dump. The start and size
18 and type of the memory area are set using three variables:
20 * "mem_size" for the size. The memory size will be rounded down to a
22 * "mem_type" to specifiy if the memory type (default is pgprot_writecombine).
28 memory to be mapped strongly ordered, and atomic operations on strongly ordered
29 memory are implementation defined, and won't work on many ARMs such as omaps.
31 The memory area is divided into "record_size" chunks (also rounded down to
41 Ramoops also supports software ECC protection of persistent memory regions.
51 For quick debugging, you can also reserve parts of memory during boot
52 and then use the reserved memory for ramoops. For example, assuming a machine
[all …]
Dnumastat.txt8 numa_hit A process wanted to allocate memory from this node,
11 numa_miss A process wanted to allocate memory from another node,
12 but ended up with memory from this node.
15 but ended up with memory from another one.
17 local_node A process ran on this node and got memory from it.
19 other_node A process ran on this node and got memory from another node.
Dkasan.txt7 KernelAddressSANitizer (KASAN) is a dynamic memory error detector. It provides
11 KASAN uses compile-time instrumentation for checking every memory access,
30 Currently KASAN works only with the SLUB memory allocator.
123 the description of the accessed memory page.
125 In the last section the report shows memory state around the accessed address.
128 The state of each 8 aligned bytes of memory is encoded in one shadow byte.
131 of the corresponding memory region are accessible; number N (1 <= N <= 7) means
135 inaccessible memory like redzones or freed memory (see mm/kasan/kasan.h).
144 From a high level, our approach to memory error detection is similar to that
145 of kmemcheck: use shadow memory to record whether each byte of memory is safe
[all …]
DDMA-API.txt10 Part II describes extensions for supporting non-consistent memory
33 Consistent memory is memory for which a write by either the device or
37 devices to read that memory.)
39 This routine allocates a region of <size> bytes of consistent memory.
48 Note: consistent memory can be expensive on some platforms, and the
50 consolidate your requests for consistent memory as much as possible.
56 the returned memory, like GFP_DMA).
62 Wraps dma_alloc_coherent() and also zeroes the returned memory if the
69 Free a region of consistent memory you previously allocated. dev,
83 Many drivers need lots of small DMA-coherent memory regions for DMA
[all …]
Dunaligned-memory-access.txt5 when it comes to memory access. This document presents some details about
13 Unaligned memory accesses occur when you try to read N bytes of data starting
16 reading 4 bytes of data from address 0x10005 would be an unaligned memory
19 The above may seem a little vague, as memory access can happen in different
21 or write a number of bytes to or from memory (e.g. movb, movw, movl in x86
23 which will compile to multiple-byte memory access instructions, namely when
31 When accessing N bytes of memory, the base memory address must be evenly
38 of memory access. However, we must consider ALL supported architectures;
46 The effects of performing an unaligned memory access vary from architecture
50 - Some architectures are able to perform unaligned memory accesses
[all …]
Datomic_ops.txt38 proper implicit or explicit read memory barrier is needed before reading the
59 or explicit memory barrier is needed before the value set with the operation
69 implicit or explicit memory barrier is used after possible runtime
73 interface must take care of that with a proper implicit or explicit memory
83 or processor, and explicitly invoke the appropriate compiler and/or memory
150 and never changed later, so that memory barriers are not needed:
173 Don't even -think- about doing this without proper use of memory barriers,
192 require any explicit memory barriers. They need only perform the
205 include explicit memory barriers that are performed before and after
206 the operation. It must be done such that all memory operations before
[all …]
Dmemory-barriers.txt10 (*) Abstract memory access model.
15 (*) What are memory barriers?
17 - Varieties of memory barrier.
18 - What may not be assumed about memory barriers?
22 - Examples of memory barrier sequences.
23 - Read memory barriers vs load speculation.
29 - CPU memory barriers.
32 (*) Implicit kernel memory barriers.
41 - Locks vs memory accesses.
44 (*) Where are memory barriers needed?
[all …]
Dbad_memory.txt5 How to deal with bad memory e.g. reported by memtest86+ ?
10 1) Reinsert/swap the memory modules
12 2) Buy new modules (best!) or try to exchange the memory
34 Syntax to exclude a memory area (see kernel-parameters.txt for details):
Dflexible-arrays.txt5 Large contiguous memory allocations can be unreliable in the Linux kernel.
8 memory from vmalloc() must be mapped into a relatively small address space;
15 In many cases, the need for memory from vmalloc() can be eliminated by
21 reasonably well. Only single-page allocations are made, so memory
39 argument is passed directly to the internal memory allocation calls. With
40 the current code, using flags to ask for high memory is likely to lead to
57 the array was created). If any memory allocations must be performed, flags
63 memory allocator would be a bad thing. That can be avoided by using
65 trick is to ensure that any needed memory allocations are done before
71 This function will ensure that memory for the elements indexed in the range
[all …]
DSM501.txt38 The centralised memory allocation allows the driver to ensure that the
42 The primary issue with memory allocation is that of moving the video
44 occurs the memory footprint of the video subsystem changes.
46 Since video memory is difficult to move without changing the display
47 (unless sufficient contiguous memory can be provided for the old and new
48 modes simultaneously) the video driver fully utilises the memory area
50 of it. Any memory left over in the middle is used for the acceleration
Dnommu-mmap.txt5 The kernel has limited support for memory mapping under no-MMU conditions, such
6 as are used in uClinux environments. From the userspace point of view, memory
53 memory and any extraneous space beyond the EOF will be cleared
81 In the no-MMU case: The filesystem providing the memory-backed file
84 case, a shared-writable memory mapping will be possible. It will work
92 In the no-MMU case: As for memory backed regular files, but the
95 all its memory as a contiguous array upfront.
103 provides memory or quasi-memory that can be accessed directly. Examples
122 (*) The memory allocated by a request for an anonymous mapping will normally
139 However, for memory that isn't required to be precleared - such as that
[all …]
Ddell_rbu.txt8 update itself with the image downloaded in to the memory.
23 would place each packet in contiguous physical memory. The driver also
25 If the dell_rbu driver is unloaded all the allocated memory is freed.
43 copied to a single contiguous block of physical memory.
44 In case of packet mechanism the single memory can be broken in smaller chunks
45 of contiguous memory and the BIOS image is scattered in these packets.
47 By default the driver uses monolithic memory for the update type. This can be
65 the file and spreads it across the physical memory in contiguous packet_sized
82 memory allocated by the driver.
Dvolatile-considered-harmful.txt19 safe (spinlocks, mutexes, memory barriers, etc.) are designed to prevent
35 primitives act as memory barriers - they are explicitly written to do so -
38 spin_lock() call, since it acts as a memory barrier, will force it to
49 The volatile storage class was originally meant for memory-mapped I/O
52 accesses within a critical section. But, within the kernel, I/O memory
53 accesses are always done through accessor functions; accessing I/O memory
74 architectures where direct I/O memory access does work. Essentially,
78 - Inline assembly code which changes memory, but which has no other
89 - Pointers to data structures in coherent memory which might be modified
DDMA-attributes.txt11 to a memory region with the DMA_ATTR_WRITE_BARRIER attribute forces
17 the way from the DMA device to memory.
21 ready and available in memory. The DMA of the "completion indication"
22 could race with data DMA. Mapping the memory used for completion
49 consistent or non-consistent memory as it sees fit. By using this API,
51 necessary sync points for this memory in the driver.
102 also in physical memory.
/linux-4.4.14/Documentation/ABI/testing/
Dsysfs-devices-memory1 What: /sys/devices/system/memory
5 The /sys/devices/system/memory contains a snapshot of the
6 internal state of the kernel memory blocks. Files could be
9 Users: hotplug memory add/remove tools
12 What: /sys/devices/system/memory/memoryX/removable
16 The file /sys/devices/system/memory/memoryX/removable
17 indicates whether this memory block is removable or not.
19 identify removable sections of the memory before attempting
20 potentially expensive hot-remove memory operation
21 Users: hotplug memory remove tools
[all …]
Dsysfs-firmware-memmap5 On all platforms, the firmware provides a memory map which the
6 kernel reads. The resources from that memory map are registered
10 However, on most architectures that firmware-provided memory
12 the kernel merges that memory map with other information or
13 just because the user overwrites that memory map via command
16 kexec needs the raw firmware-provided memory map to setup the
18 kexec. Also, the raw memory map is useful for debugging. For
20 the raw memory map to userspace.
31 The maximum depends on the number of memory map entries provided
59 Following shell snippet can be used to display that memory
Dsysfs-firmware-efi-runtime-map5 that all efi memory ranges which have the runtime attribute
17 subdirectories are named with the number of the memory range:
27 attribute : The attributes of the memory range.
28 num_pages : The size of the memory range in pages.
29 phys_addr : The physical address of the memory range.
30 type : The type of the memory range.
31 virt_addr : The virtual address of the memory range.
Dsysfs-devices-edac5 counters for UE and CE errors on the given memory controller.
22 Description: This attribute file displays the type of memory controller
28 Description: This attribute file displays, in count of megabytes, of memory
29 that this memory controller manages.
35 errors that have occurred on this memory controller. If
43 occurred on this memory controller with no information as to
50 errors that have occurred on this memory controller. This
60 have occurred on this memory controller wherewith no
63 to indicate which slot the failing memory is in. This count
69 Description: Read/Write attribute file that controls memory scrubbing.
[all …]
Dsysfs-memory-page-offline1 What: /sys/devices/system/memory/soft_offline_page
6 Soft-offline the memory page containing the physical address
23 What: /sys/devices/system/memory/hard_offline_page
28 Hard-offline the memory page containing the physical
/linux-4.4.14/Documentation/cgroups/
Dmemory.txt9 memory controller in this document. Do not confuse memory controller
10 used here with the memory controller that is used in hardware.
14 When we mention a cgroup (cgroupfs's directory) with memory controller,
15 we call it "memory cgroup". When you see git-log and source code, you'll
19 Benefits and Purpose of the memory controller
21 The memory controller isolates the memory behaviour of a group of tasks
23 uses of the memory controller. The memory controller can be used to
27 amount of memory.
28 b. Create a cgroup with a limited amount of memory; this can be used
30 c. Virtualization solutions can control the amount of memory they want
[all …]
Dmemcg_test.txt9 (*) Topics on API should be in Documentation/cgroups/memory.txt)
15 Allocated at boot or memory hotplug. Freed at memory hot removal.
172 memory hotplug test is one of good test.
173 to offline memory, do following.
174 # echo offline > /sys/devices/system/memory/memoryXXX/state
175 (XXX is the place of memory)
182 echo 1 >/opt/cgroup/01/memory/use_hierarchy
202 # mount -t cgroup none /cgroup -o cpuset,memory,cpu,devices
213 # mount -t cgroup none /cgroup -o memory
215 # echo 40M > /cgroup/test/memory.limit_in_bytes
[all …]
Dcpusets.txt22 1.6 What is memory spread ?
42 an on-line node that contains memory.
55 set_mempolicy(2) system calls to include Memory Nodes in its memory
73 complex memory cache hierarchies and multiple Memory Nodes having
75 the efficient scheduling and memory placement of processes.
82 memory placement to reduce memory access times and contention,
98 when the memory locations are changed.
104 memory allocator code.
115 Nodes it may obtain memory (mbind, set_mempolicy).
149 - in page_alloc.c, to restrict memory to allowed nodes.
[all …]
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/core/
Dmemory.c29 struct nvkm_memory *memory) in nvkm_memory_ctor() argument
31 memory->func = func; in nvkm_memory_ctor()
37 struct nvkm_memory *memory = *pmemory; in nvkm_memory_del() local
38 if (memory && !WARN_ON(!memory->func)) { in nvkm_memory_del()
39 if (memory->func->dtor) in nvkm_memory_del()
40 *pmemory = memory->func->dtor(memory); in nvkm_memory_del()
52 struct nvkm_memory *memory; in nvkm_memory_new() local
58 ret = nvkm_instobj_new(imem, size, align, zero, &memory); in nvkm_memory_new()
62 *pmemory = memory; in nvkm_memory_new()
Dgpuobj.c48 return nvkm_ro32(gpuobj->memory, offset); in nvkm_gpuobj_heap_rd32()
54 nvkm_wo32(gpuobj->memory, offset, data); in nvkm_gpuobj_heap_wr32()
62 nvkm_done(gpuobj->memory); in nvkm_gpuobj_heap_release()
82 gpuobj->map = nvkm_kmap(gpuobj->memory); in nvkm_gpuobj_heap_acquire()
179 abs(align), zero, &gpuobj->memory); in nvkm_gpuobj_ctor()
184 gpuobj->addr = nvkm_memory_addr(gpuobj->memory); in nvkm_gpuobj_ctor()
185 gpuobj->size = nvkm_memory_size(gpuobj->memory); in nvkm_gpuobj_ctor()
199 nvkm_memory_del(&gpuobj->memory); in nvkm_gpuobj_del()
225 struct nvkm_memory *memory = gpuobj->memory; in nvkm_gpuobj_map() local
228 nvkm_memory_map(memory, vma, 0); in nvkm_gpuobj_map()
[all …]
/linux-4.4.14/Documentation/devicetree/bindings/reserved-memory/
Dreserved-memory.txt1 *** Reserved memory regions ***
3 Reserved memory is specified as a node under the /reserved-memory node.
4 The operating system shall exclude reserved memory from normal usage
6 normal use) memory regions. Such memory regions are usually designed for
9 Parameters for each memory region can be encoded into the device tree
12 /reserved-memory node
19 /reserved-memory/ child nodes
21 Each child of the reserved-memory node specifies one or more regions of
22 reserved memory. Each child node may either use a 'reg' property to
23 specify a specific range of reserved memory, or a 'size' property with
[all …]
/linux-4.4.14/drivers/staging/octeon/
Dethernet-mem.c55 char *memory; in cvm_oct_free_hw_skbuff() local
58 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff()
59 if (memory) { in cvm_oct_free_hw_skbuff()
61 *(struct sk_buff **)(memory - sizeof(void *)); in cvm_oct_free_hw_skbuff()
65 } while (memory); in cvm_oct_free_hw_skbuff()
85 char *memory; in cvm_oct_fill_hw_memory() local
100 memory = kmalloc(size + 256, GFP_ATOMIC); in cvm_oct_fill_hw_memory()
101 if (unlikely(memory == NULL)) { in cvm_oct_fill_hw_memory()
106 fpa = (char *)(((unsigned long)memory + 256) & ~0x7fUL); in cvm_oct_fill_hw_memory()
107 *((char **)fpa - 1) = memory; in cvm_oct_fill_hw_memory()
[all …]
/linux-4.4.14/Documentation/vm/
Dnuma10 or more CPUs, local memory, and/or IO buses. For brevity and to
24 Coherent NUMA or ccNUMA systems. With ccNUMA systems, all memory is visible
28 Memory access time and effective memory bandwidth varies depending on how far
29 away the cell containing the CPU or IO bus making the memory access is from the
30 cell containing the target memory. For example, access to memory by CPUs
32 bandwidths than accesses to memory on other, remote cells. NUMA platforms
37 memory bandwidth. However, to achieve scalable memory bandwidth, system and
38 application software must arrange for a large majority of the memory references
39 [cache misses] to be to "local" memory--memory on the same cell, if any--or
40 to the closest cell with memory.
[all …]
Dfrontswap.txt1 Frontswap provides a "transcendent memory" interface for swap pages.
6 and the only necessary changes to the core kernel for transcendent memory;
8 See the LWN.net article "Transcendent memory in a nutshell" for a detailed
15 to the requirements of transcendent memory (such as Xen's "tmem", or
16 in-kernel compressed memory, aka "zcache", or future RAM-like devices);
25 copy the page to transcendent memory and associate it with the type and
27 from transcendent memory into kernel memory, but will NOT remove the page
28 from transcendent memory. An "invalidate_page" will remove the page
29 from transcendent memory and an "invalidate_area" will remove ALL pages
36 success, the data has been successfully saved to transcendent memory and
[all …]
Dpage_owner.txt7 It can be used to debug memory leak or to find a memory hogger.
18 possibility rather than just keeping it in memory, so bad for debugging.
29 doesn't require memory to store owner information, so there is no runtime
30 memory overhead. And, page owner inserts just two unlikely branches into
49 kernel memory problem.
52 stores information into the memory from struct page extension. This memory
54 memory system, so, until initialization, many pages can be allocated and
59 more accurately. On 2GB memory x86-64 VM box, 13343 early allocated pages
Dnuma_memory_policy.txt4 In the Linux kernel, "memory policy" determines from which node the kernel will
5 allocate memory in a NUMA system or in an emulated NUMA system. Linux has
7 The current memory policy support was added to Linux 2.6 around May 2004. This
8 document attempts to describe the concepts and APIs of the 2.6 memory policy
14 memory may be allocated by a set of processes. Memory policies are a
23 The Linux kernel supports _scopes_ of memory policy, described here from
32 with "sufficient" memory, so as not to overload the initial boot node
46 executable image that has no awareness of memory policy. See the
93 memory area into 2 or 3 VMAs, each with it's own policy.
102 Shared Policy: Conceptually, shared policies apply to "memory objects"
[all …]
Dpagemap.txt37 determine which areas of memory are actually mapped and llseek to
76 memory cgroup each page is charged to, indexed by PFN. Only available when
85 page is managed by the SLAB/SLOB/SLUB/SLQB kernel memory allocator
90 a free memory block managed by the buddy system allocator
91 The buddy system organizes free memory in blocks of various orders.
101 memory allocators and various device drivers. However in this interface,
107 hardware detected memory corruption on this page: don't touch the data!
113 identical memory pages dynamically shared between one or more processes
133 ie. for file backed page: (in-memory data revision >= on-disk one)
135 ie. for file backed page: (in-memory data revision > on-disk one)
[all …]
Didle_page_tracking.txt3 The idle page tracking feature allows to track which memory pages are being
6 account when configuring the workload parameters, setting memory cgroup limits,
16 The file implements a bitmap where each bit corresponds to a memory page. The
27 Only accesses to user memory pages are tracked. These are pages mapped to a
47 placed in a memory cgroup.
60 The kernel internally keeps track of accesses to user memory pages in order to
61 reclaim unreferenced pages first on memory shortage conditions. A page is
76 When a dirty page is written to swap or disk as a result of memory reclaim or
77 exceeding the dirty memory limit, it is not marked referenced.
79 The idle memory tracking feature adds a new page flag, the Idle flag. This flag
[all …]
Dhighmem.txt10 (*) What is high memory?
25 High memory (highmem) is used when the size of physical memory approaches or
26 exceeds the maximum size of virtual memory. At that point it becomes
27 impossible for the kernel to keep all of the available physical memory mapped
29 the pieces of physical memory that it wants to access.
31 The part of (physical) memory not covered by a permanent mapping is what we
37 kernel entry/exit. This means the available virtual memory space (4GiB on
51 This means that the kernel can at most map 1GiB of physical memory at any one
53 temporary maps to access the rest of the physical memory - the actual direct
92 wants to access the contents of a page that might be allocated from high memory
[all …]
Dtranshuge.txt5 Performance critical computing applications dealing with large memory
8 using huge pages for the backing of virtual memory with huge pages
12 Currently it only works for anonymous memory mappings but in the
22 only matters the first time the memory is accessed for the lifetime of
23 a memory mapping. The second long lasting and much more important
24 factor will affect all subsequent accesses to the memory for the whole
29 mapping a much larger amount of virtual memory in turn reducing the
43 - if a hugepage allocation fails because of memory fragmentation,
49 immediately in the buddy or through the VM), guest physical memory
53 - it doesn't require memory reservation and in turn it uses hugepages
[all …]
Dovercommit-accounting7 allocate slightly more memory in this mode. This is the
12 and just relying on the virtual memory consisting almost
20 pages but will receive errors on memory allocation as
24 memory allocations will be available in the future
62 shmfs memory drawn from the same pool
67 o We account mmap memory mappings
Dbalance17 is, only when needed (aka zone free memory is 0), instead of making it
25 regular memory requests by allocating one from the dma pool, instead
28 In 2.2, memory balancing/page reclamation would kick off only when the
29 _total_ number of free pages fell below 1/64 th of total memory. With the
30 right ratio of dma and regular memory, it is quite possible that balancing
32 been running production machines of varying memory sizes, and seems to be
45 Another possible solution is that we balance only when the free memory
47 total memory in the zone and its lower class zones. This fixes the 2.2
60 problems: first, kswapd is woken up as in 2.2 on low memory conditions
76 Page stealing from process memory and shm is done if stealing the page would
[all …]
Dhwpoison.txt3 Upcoming Intel CPUs have support for recovering from some memory errors
12 * hardware as being corrupted usually due to a 2bit ECC memory or cache
24 * users, because memory failures could happen anytime and anywhere,
36 The code consists of a the high level handler in mm/memory-failure.c,
46 memory failures too. The expection is that near all applications
51 There are two (actually three) modi memory failure recovery can be in:
54 All memory failures cause a panic. Do not attempt recovery.
61 This allows applications who can process memory errors in a gentle
67 This is best for memory error unaware applications and default
119 memory failures.
[all …]
Dsoft-dirty.txt27 This is so, since the pages are still mapped to physical memory, and thus all
31 While in most cases tracking memory changes by #PF-s is more than enough
33 unmaps a previously mapped memory region and then maps a new one at exactly
36 memory region renewal the kernel always marks new memory regions (and
Duserfaultfd.txt7 memory page faults, something otherwise only the kernel code could do.
17 memory ranges) provides two primary functionalities:
22 2) various UFFDIO_* ioctls that can manage the virtual memory regions
25 memory in the background
27 The real advantage of userfaults if compared to regular virtual memory
59 register a memory range in the userfaultfd by setting the
66 supported for all memory types depending on the underlying virtual
67 memory backend (anonymous memory vs tmpfs vs real filebacked
72 memory from the userfaultfd registered range). This means a userfault
87 migration. Postcopy live migration is one form of memory
[all …]
Dhugetlbpage.txt14 system call or standard SYSV shared memory system calls (shmget, shmat).
62 memory pressure.
66 or shared memory system calls to use the huge pages. See the discussion of
72 allocating huge pages as memory has not yet become fragmented.
91 over all the set of allowed nodes specified by the NUMA memory policy of the
93 task has default memory policy--is all on-line nodes with memory. Allowed
94 nodes with insufficient available, contiguous memory for a huge page will be
96 below of the interaction of task memory policy, cpusets and per node attributes
100 physically contiguous memory that is present in system at the time of the
104 memory, if any.
[all …]
Dcleancache.txt9 to keep around, but can't since there isn't enough memory. So when the
11 put the data contained in that page into "transcendent memory", memory
20 Transcendent memory "drivers" for cleancache are currently implemented
21 in Xen (using hypervisor memory) and zcache (using in-kernel compressed
22 memory) and other implementations are in development.
28 A cleancache "backend" that provides transcendent memory registers itself
46 A "get_page" will copy the page, if found, from cleancache into kernel memory.
103 saved in transcendent memory (RAM that is otherwise not directly
108 this transcendent memory (aka "tmem"), which conceptually lies between
115 faster-than-disk transcendent memory, and the cleancache (and frontswap)
[all …]
Dksm.txt4 KSM is a memory-saving de-duplication feature, enabled by CONFIG_KSM=y,
8 The KSM daemon ksmd periodically scans those areas of user memory which
14 Kernel Shared Memory), to fit more virtual machines into physical memory,
19 KSM's merged pages were originally locked into kernel memory, but can now
30 more memory than is available - possibly failing with EAGAIN, but more
44 and might fail with EAGAIN if not enough memory for internal structures.
63 reside in the memory area of same NUMA node. That brings
67 need to minimize memory usage, are likely to benefit from
D00-INDEX6 - various information on memory balancing.
10 - Outline frontswap, part of the transcendent memory frontend.
24 - documentation of concepts and APIs of the 2.6 memory policy support.
/linux-4.4.14/Documentation/devicetree/bindings/powerpc/fsl/
Dmem-ctrlr.txt1 Freescale DDR memory controller
5 - compatible : Should include "fsl,chip-memory-controller" where
7 "fsl,qoriq-memory-controller".
13 memory-controller@2000 {
14 compatible = "fsl,bsc9132-memory-controller";
22 ddr1: memory-controller@8000 {
23 compatible = "fsl,qoriq-memory-controller-v4.7",
24 "fsl,qoriq-memory-controller";
/linux-4.4.14/arch/powerpc/mm/
Dnuma.c187 static const __be32 *of_get_usable_memory(struct device_node *memory) in of_get_usable_memory() argument
191 prop = of_get_property(memory, "linux,drconf-usable-memory", &len); in of_get_usable_memory()
368 struct device_node *memory = NULL; in get_n_mem_cells() local
370 memory = of_find_node_by_type(memory, "memory"); in get_n_mem_cells()
371 if (!memory) in get_n_mem_cells()
374 *n_addr_cells = of_n_addr_cells(memory); in get_n_mem_cells()
375 *n_size_cells = of_n_size_cells(memory); in get_n_mem_cells()
376 of_node_put(memory); in get_n_mem_cells()
416 static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm) in of_get_drconf_memory() argument
421 prop = of_get_property(memory, "ibm,dynamic-memory", &len); in of_get_drconf_memory()
[all …]
/linux-4.4.14/Documentation/powerpc/
Dfirmware-assisted-dump.txt13 - Fadump uses the same firmware interfaces and memory reservation model
15 - Unlike phyp dump, fadump exports the memory dump through /proc/vmcore
20 - Unlike phyp dump, fadump allows user to release all the memory reserved
34 -- Once the dump is copied out, the memory that held the dump
43 -- The first kernel registers the sections of memory with the
45 These registered sections of memory are reserved by the first
49 the low memory (boot memory of size larger of 5% of system RAM
53 NOTE: The term 'boot memory' means size of the low memory chunk
55 booted with restricted memory. By default, the boot memory
57 Alternatively, user can also specify boot memory size
[all …]
/linux-4.4.14/drivers/gpu/drm/
Ddrm_agpsupport.c200 struct agp_memory *memory; in drm_agp_alloc() local
211 if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) { in drm_agp_alloc()
216 entry->handle = (unsigned long)memory->key + 1; in drm_agp_alloc()
217 entry->memory = memory; in drm_agp_alloc()
220 list_add(&entry->head, &dev->agp->memory); in drm_agp_alloc()
223 request->physical = memory->physical; in drm_agp_alloc()
252 list_for_each_entry(entry, &dev->agp->memory, head) { in drm_agp_lookup_entry()
282 ret = drm_unbind_agp(entry->memory); in drm_agp_unbind()
324 if ((retcode = drm_bind_agp(entry->memory, page))) in drm_agp_bind()
365 drm_unbind_agp(entry->memory); in drm_agp_free()
[all …]
Ddrm_memory.c69 list_for_each_entry(agpmem, &dev->agp->memory, head) in agp_remap()
74 if (&agpmem->head == &dev->agp->memory) in agp_remap()
87 phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE); in agp_remap()
/linux-4.4.14/drivers/char/agp/
Dcompat_ioctl.c150 struct agp_memory *memory; in compat_agpioc_allocate_wrap() local
157 memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type); in compat_agpioc_allocate_wrap()
159 if (memory == NULL) in compat_agpioc_allocate_wrap()
162 alloc.key = memory->key; in compat_agpioc_allocate_wrap()
163 alloc.physical = memory->physical; in compat_agpioc_allocate_wrap()
166 agp_free_memory_wrap(memory); in compat_agpioc_allocate_wrap()
175 struct agp_memory *memory; in compat_agpioc_bind_wrap() local
181 memory = agp_find_mem_by_key(bind_info.key); in compat_agpioc_bind_wrap()
183 if (memory == NULL) in compat_agpioc_bind_wrap()
186 return agp_bind_memory(memory, bind_info.pg_start); in compat_agpioc_bind_wrap()
[all …]
Dfrontend.c270 void agp_free_memory_wrap(struct agp_memory *memory) in agp_free_memory_wrap() argument
272 agp_remove_from_pool(memory); in agp_free_memory_wrap()
273 agp_free_memory(memory); in agp_free_memory_wrap()
278 struct agp_memory *memory; in agp_allocate_memory_wrap() local
280 memory = agp_allocate_memory(agp_bridge, pg_count, type); in agp_allocate_memory_wrap()
281 if (memory == NULL) in agp_allocate_memory_wrap()
284 agp_insert_into_pool(memory); in agp_allocate_memory_wrap()
285 return memory; in agp_allocate_memory_wrap()
360 struct agp_memory *memory; in agp_remove_all_memory() local
363 memory = controller->pool; in agp_remove_all_memory()
[all …]
Dbackend.c115 long memory, index, result; in agp_find_max() local
118 memory = totalram_pages >> (20 - PAGE_SHIFT); in agp_find_max()
120 memory = totalram_pages << (PAGE_SHIFT - 20); in agp_find_max()
124 while ((memory > maxes_table[index].mem) && (index < 8)) in agp_find_max()
128 ( (memory - maxes_table[index - 1].mem) * in agp_find_max()
/linux-4.4.14/Documentation/devicetree/bindings/soc/qcom/
Dqcom,smem.txt12 - memory-region:
15 Definition: handle to memory reservation for main SMEM memory region.
20 Definition: handle to RPM message memory resource
26 the shared memory
32 reserved-memory {
46 memory-region = <&smem_region>;
53 rpm_msg_ram: memory@fc428000 {
/linux-4.4.14/mm/
DKconfig17 Linux manages its memory internally. Most users will
22 memory hotplug may have different options here.
24 but is incompatible with memory hotplug and may suffer
36 memory systems, over FLATMEM. These systems have holes
52 memory hotplug systems. This is normal.
82 # to represent different areas of memory. This variable allows
153 bool "Enable to assign a node which has only movable memory"
160 Allow a node to have only movable memory. Pages used by the kernel,
162 memory device cannot be hotplugged. This option allows the following
164 - When the system is booting, node full of hotpluggable memory can
[all …]
DKconfig.debug6 field for every page. This extension enables us to save memory
7 by not allocating this extra memory according to boottime
11 bool "Debug page memory allocations"
20 of memory corruption.
26 that would result in incorrect warnings of memory corruption after
Dmemblock.c35 .memory.regions = memblock_memory_init_regions,
36 .memory.cnt = 1, /* empty dummy entry */
37 .memory.max = INIT_MEMBLOCK_REGIONS,
71 if (type == &memblock.memory) in memblock_type_name()
320 if (memblock.memory.regions == memblock_memory_init_regions) in get_allocated_memblock_memory_regions_info()
323 *addr = __pa(memblock.memory.regions); in get_allocated_memblock_memory_regions_info()
326 memblock.memory.max); in get_allocated_memblock_memory_regions_info()
373 if (type == &memblock.memory) in memblock_double_array()
610 return memblock_add_range(&memblock.memory, base, size, nid, 0); in memblock_add_node()
618 struct memblock_type *type = &memblock.memory; in memblock_add_region()
[all …]
/linux-4.4.14/Documentation/devicetree/bindings/ata/
Dapm-xgene.txt9 - reg : First memory resource shall be the AHCI memory
11 Second memory resource shall be the host controller
12 core memory resource.
13 Third memory resource shall be the host controller
14 diagnostic memory resource.
15 4th memory resource shall be the host controller
16 AXI memory resource.
17 5th optional memory resource shall be the host
18 controller MUX memory resource if required.
/linux-4.4.14/drivers/nvdimm/
DKconfig6 Generic support for non-volatile memory devices including
9 bus is registered to advertise PMEM (persistent memory)
12 memory resource that may span multiple DIMMs and support DAX
15 mode to non-volatile memory.
20 tristate "PMEM: Persistent memory block device support"
28 non-standard OEM-specific E820 memory type (type-12, see
32 these persistent memory ranges into block devices that are
44 access capability. BLK-mode access uses memory-mapped-i/o
63 update semantics for persistent memory devices, so that
77 bool "PFN: Map persistent (device) memory"
[all …]
/linux-4.4.14/drivers/xen/
DKconfig5 bool "Xen memory balloon driver"
8 The balloon driver allows the Xen domain to request more memory from
9 the system to expand the domain's memory allocation, or alternatively
10 return unneeded memory to the system.
13 bool "Dynamically self-balloon kernel memory to target"
17 Self-ballooning dynamically balloons available kernel memory driven
18 by the current usage of anonymous memory ("committed AS") and
33 Memory hotplug support for Xen balloon driver allows expanding memory
41 where <maxmem> is >= requested memory size,
43 2) dom0: xl mem-set <domU> <memory>
[all …]
/linux-4.4.14/Documentation/devicetree/bindings/memory-controllers/
Drenesas-memory-controllers.txt1 DT bindings for Renesas R-Mobile and SH-Mobile memory controllers
4 Renesas R-Mobile and SH-Mobile SoCs contain one or more memory controllers.
5 These memory controllers differ from one SoC variant to another, and are called
9 Currently memory controller device nodes are used only to reference PM
21 - reg: Must contain the base address and length of the memory controller's
25 - interrupts: Must contain a list of interrupt specifiers for memory
32 - power-domains: Must contain a reference to the PM domain that the memory
37 sbsc1: memory-controller@fe400000 {
Darm,pl172.txt17 - ranges: Must contain one or more chip select memory regions.
28 Child chip-select (cs) nodes contain the memory devices nodes connected to
38 memory layout.
45 which chipselect is used for accessing the memory.
47 - mpmc,memory-width: Width of the chip select memory. Must be equal to
82 - mpmc,turn-round-delay: Delay between access to memory banks in nano
90 emc: memory-controller@40005000 {
108 mpmc,memory-width = <16>;
Dti-aemif.txt4 provide a glue-less interface to a variety of asynchronous memory devices like
5 ASRA M, NOR and NAND memory. A total of 256M bytes of any of these memories
32 - ranges: Contains memory regions. There are two types of
35 set up to reflect the memory layout for 4 chipselects,
56 Child chip-select (cs) nodes contain the memory devices nodes connected to
67 memory layout.
74 which chipselect is used for accessing the memory. For
96 Time between the end of one asynchronous memory
98 memory access. This delay is not incurred
103 Time between the beginning of a memory cycle
[all …]
Dcalxeda-ddr-ctrlr.txt1 Calxeda DDR memory controller
12 memory-controller@fff00000 {
/linux-4.4.14/Documentation/sysctl/
Dvm.txt13 of the virtual memory (VM) subsystem of the Linux kernel and
67 The amount of free memory in the system that should be reserved for users
90 Changing this takes effect whenever an application requests memory.
104 all zones are compacted such that free memory is available in contiguous
106 huge pages although processes will also directly compact memory as required.
115 acceptable trade for large contiguous free memory. Set to 0 to prevent
122 Contains the amount of dirty memory at which the background kernel
127 immediately taken into account to evaluate the dirty memory limits and the
134 Contains, as a percentage of total available memory that contains free pages
138 The total avaiable memory is not equal to total system memory.
[all …]
/linux-4.4.14/Documentation/x86/x86_64/
Dmm.txt4 Virtual memory map with 4 level page tables:
9 ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all phys. memory
13 ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
15 ffffec0000000000 - fffffc0000000000 (=44 bits) kasan shadow memory (16TB)
24 The direct mapping covers all memory in the system up to the highest
25 memory address (this means in some cases it can also include PCI memory
Dfake-numa-for-cpusets5 in conjunction with cpusets for coarse memory management. Using this feature,
6 you can create fake NUMA nodes that represent contiguous chunks of memory and
8 amount of system memory that are available to a certain class of tasks.
17 emulation setup of "numa=fake=4*512,". This will split our system memory into
36 Documentation/cgroups/cpusets.txt, you can assign fake nodes (i.e. contiguous memory
47 memory allocations (1G).
49 You can now assign tasks to these cpusets to limit the memory resources
56 Notice the difference between the system memory usage as reported by
64 This allows for coarse memory management for the tasks you assign to particular
67 memory management needs.
/linux-4.4.14/arch/powerpc/boot/dts/fsl/
Dqoriq-sec5.3-0.dtsi86 compatible = "fsl,sec-v5.3-rtic-memory",
87 "fsl,sec-v5.0-rtic-memory",
88 "fsl,sec-v4.0-rtic-memory";
93 compatible = "fsl,sec-v5.3-rtic-memory",
94 "fsl,sec-v5.0-rtic-memory",
95 "fsl,sec-v4.0-rtic-memory";
100 compatible = "fsl,sec-v5.3-rtic-memory",
101 "fsl,sec-v5.0-rtic-memory",
102 "fsl,sec-v4.0-rtic-memory";
107 compatible = "fsl,sec-v5.3-rtic-memory",
[all …]
Dqoriq-sec5.2-0.dtsi86 compatible = "fsl,sec-v5.2-rtic-memory",
87 "fsl,sec-v5.0-rtic-memory",
88 "fsl,sec-v4.0-rtic-memory";
93 compatible = "fsl,sec-v5.2-rtic-memory",
94 "fsl,sec-v5.0-rtic-memory",
95 "fsl,sec-v4.0-rtic-memory";
100 compatible = "fsl,sec-v5.2-rtic-memory",
101 "fsl,sec-v5.0-rtic-memory",
102 "fsl,sec-v4.0-rtic-memory";
107 compatible = "fsl,sec-v5.2-rtic-memory",
[all …]
Dqoriq-sec5.0-0.dtsi81 compatible = "fsl,sec-v5.0-rtic-memory",
82 "fsl,sec-v4.0-rtic-memory";
87 compatible = "fsl,sec-v5.0-rtic-memory",
88 "fsl,sec-v4.0-rtic-memory";
93 compatible = "fsl,sec-v5.0-rtic-memory",
94 "fsl,sec-v4.0-rtic-memory";
99 compatible = "fsl,sec-v5.0-rtic-memory",
100 "fsl,sec-v4.0-rtic-memory";
Dqoriq-sec4.2-0.dtsi81 compatible = "fsl,sec-v4.2-rtic-memory",
82 "fsl,sec-v4.0-rtic-memory";
87 compatible = "fsl,sec-v4.2-rtic-memory",
88 "fsl,sec-v4.0-rtic-memory";
93 compatible = "fsl,sec-v4.2-rtic-memory",
94 "fsl,sec-v4.0-rtic-memory";
99 compatible = "fsl,sec-v4.2-rtic-memory",
100 "fsl,sec-v4.0-rtic-memory";
Dp1023si-post.dtsi181 memory-controller@2000 {
182 compatible = "fsl,p1023-memory-controller";
258 compatible = "fsl,sec-v4.2-rtic-memory",
259 "fsl,sec-v4.0-rtic-memory";
264 compatible = "fsl,sec-v4.2-rtic-memory",
265 "fsl,sec-v4.0-rtic-memory";
270 compatible = "fsl,sec-v4.2-rtic-memory",
271 "fsl,sec-v4.0-rtic-memory";
276 compatible = "fsl,sec-v4.2-rtic-memory",
277 "fsl,sec-v4.0-rtic-memory";
[all …]
Dp1020rdb-pc_camp_core0.dts6 * This dts file allows core0 to have memory, l2, i2c, spi, gpio, tdm, dma, usb,
39 memory {
40 device_type = "memory";
/linux-4.4.14/drivers/media/platform/exynos4-is/
Dfimc-is.c241 buf = is->memory.vaddr + is->setfile.base; in fimc_is_load_setfile()
246 pr_debug("mem vaddr: %p, setfile buf: %p\n", is->memory.vaddr, buf); in fimc_is_load_setfile()
271 mcuctl_write(is->memory.paddr, is, MCUCTL_REG_BBOAR); in fimc_is_cpu_set_power()
317 memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size); in fimc_is_start_firmware()
337 is->memory.vaddr = dma_alloc_coherent(dev, FIMC_IS_CPU_MEM_SIZE, in fimc_is_alloc_cpu_memory()
338 &is->memory.paddr, GFP_KERNEL); in fimc_is_alloc_cpu_memory()
339 if (is->memory.vaddr == NULL) in fimc_is_alloc_cpu_memory()
342 is->memory.size = FIMC_IS_CPU_MEM_SIZE; in fimc_is_alloc_cpu_memory()
343 memset(is->memory.vaddr, 0, is->memory.size); in fimc_is_alloc_cpu_memory()
345 dev_info(dev, "FIMC-IS CPU memory base: %#x\n", (u32)is->memory.paddr); in fimc_is_alloc_cpu_memory()
[all …]
/linux-4.4.14/arch/microblaze/
DKconfig143 aspects of kernel memory management.
155 This is needed to be able to allocate uncachable memory regions.
156 The feature requires the design to define the RAM memory controller
157 window to be twice as large as the actual physical memory.
160 bool "High memory support"
165 space as well as some memory mapped IO. That means that, if you
166 have a large amount of physical memory and/or IO, not all of the
167 memory can be "permanently mapped" by the kernel. The physical
168 memory that is not permanently mapped is called "high memory".
173 bool "Set maximum low memory"
[all …]
/linux-4.4.14/Documentation/devicetree/bindings/soc/fsl/
Dbman.txt67 BMan requires a contiguous range of physical memory used for the backing store
68 for BMan Free Buffer Proxy Records (FBPR). This memory is reserved/allocated as a
69 node under the /reserved-memory node
71 The BMan FBPR memory node must be named "bman-fbpr"
80 The following constraints are relevant to the FBPR private memory:
83 - The alignment must be a muliptle of the memory size
91 For additional details about reserved memory regions see reserved-memory.txt
95 The example below shows a BMan FBPR dynamic allocation memory node
97 reserved-memory {
128 memory-region = <&bman_fbpr>;
Dqman.txt75 QMan requires two contiguous range of physical memory used for the backing store
77 This memory is reserved/allocated as a nodes under the /reserved-memory node
79 The QMan FQD memory node must be named "qman-fqd"
88 The QMan PFDR memory node must be named "qman-pfdr"
97 The following constraints are relevant to the FQD and PFDR private memory:
100 - The alignment must be a muliptle of the memory size
108 For additional details about reserved memory regions see reserved-memory.txt
112 The example below shows a QMan FQD and a PFDR dynamic allocation memory nodes
114 reserved-memory {
167 memory-region = <&qman_fqd &qman_pfdr>;
/linux-4.4.14/lib/
DKconfig.kasan7 bool "KASan: runtime memory debugger"
11 Enables kernel address sanitizer - runtime memory debugger,
16 This feature consumes about 1/8 of available memory and brings about
28 Before every memory access compiler insert function call
30 of shadow memory. This is slower than inline instrumentation,
37 Compiler directly inserts code checking shadow memory before
38 memory accesses. This is faster than outline (in some workloads
DKconfig.kmemcheck7 bool "kmemcheck: trap use of uninitialized memory"
17 This option enables tracing of dynamically allocated kernel memory
18 to see if memory is used before it has been given an initial value.
19 Be aware that this requires half of your memory for bookkeeping and
20 will insert extra code at *every* read and write to tracked memory
25 kmemcheck=0, the large memory and CPU overhead is not incurred.
76 bool "kmemcheck: allow partially uninitialized memory"
/linux-4.4.14/Documentation/video4linux/cx2341x/
Dfw-memory.txt1 This document describes the cx2341x memory map and documents some of the register
4 Note: the memory long words are little-endian ('intel format').
6 Warning! This information was figured out from searching through the memory and
8 was not derived from anything more than searching through the memory space with
19 The cx2341x exposes its entire 64M memory space to the PCI host via the PCI BAR0
23 0x00000000-0x00ffffff Encoder memory space
30 0x01000000-0x01ffffff Decoder memory space
69 0x84 - first write linked list reg, for pci memory addr
70 0x88 - first write linked list reg, for length of buffer in memory addr
75 0xe0 - first (and only) read linked list reg, for pci memory addr
[all …]
Dfw-dma.txt9 memory without requiring help from a CPU. Like most hardware, it must operate
10 on contiguous physical memory. This is difficult to come by in large quantities
11 on virtual memory machines.
52 Results[1]: Offset: The position relative to the card's memory space.
62 The scatter-gather array is a contiguously allocated block of memory that
65 addresses are the physical memory location of the target DMA buffer.
/linux-4.4.14/arch/powerpc/boot/
Dtreeboot-iss4xx.c41 void *memory; in iss_4xx_fixups() local
44 memory = finddevice("/memory"); in iss_4xx_fixups()
45 if (!memory) in iss_4xx_fixups()
48 getprop(memory, "reg", reg, sizeof(reg)); in iss_4xx_fixups()
Doflib.c115 static ihandle memory; variable
144 memory = of_call_prom("open", 1, 1, "/memory"); in check_of_version()
145 if (memory == PROM_ERROR) { in check_of_version()
146 memory = of_call_prom("open", 1, 1, "/memory@0"); in check_of_version()
147 if (memory == PROM_ERROR) { in check_of_version()
167 ret = of_call_prom_ret("call-method", 5, 2, &result, "claim", memory, in of_claim()
Ddevtree.c23 void *root, *memory; in dt_fixup_memory() local
46 memory = finddevice("/memory"); in dt_fixup_memory()
47 if (! memory) { in dt_fixup_memory()
48 memory = create_node(NULL, "memory"); in dt_fixup_memory()
49 setprop_str(memory, "device_type", "memory"); in dt_fixup_memory()
57 setprop(memory, "reg", memreg, (naddr + nsize)*sizeof(u32)); in dt_fixup_memory()
/linux-4.4.14/Documentation/ia64/
Daliasing.txt10 Itanium supports several attributes for virtual memory references.
19 System memory typically uses the WB attribute. The UC attribute is
20 used for memory-mapped I/O devices. The WC attribute is uncacheable
29 support either WB or UC access to main memory, while others support
34 Platform firmware describes the physical memory map and the
36 the EFI GetMemoryMap() interface. ACPI can also describe memory
47 memory Linux is actually using and the attribute for each region.
48 This contains only system memory; it does not contain MMIO space.
51 memory described by the efi_memmap. Linux/ia64 can't use all memory
81 homogeneous with respect to memory attributes.
[all …]
/linux-4.4.14/arch/arm/boot/dts/
Decx-2000.dts66 memory@0 {
67 name = "memory";
68 device_type = "memory";
72 memory@200000000 {
73 name = "memory";
74 device_type = "memory";
88 memory-controller@fff00000 {
Datlas7-evb.dts24 memory {
25 device_type = "memory";
29 reserved-memory {
35 compatible = "sirf,reserved-memory";
49 memory-region = <&nanddisk_reserved>;
82 memory-region = <&vpp_reserved>;
Dskeleton.dtsi3 * add a compatible value. The bootloader will typically populate the memory
12 memory { device_type = "memory"; reg = <0 0>; };
Dskeleton64.dtsi4 * bootloader will typically populate the memory node.
12 memory { device_type = "memory"; reg = <0 0 0 0>; };
Dhip04-d01.dts15 /* memory bus is 64-bit */
21 memory@00000000,10000000 {
22 device_type = "memory";
Dkirkwood-lsxhl.dts9 memory {
10 device_type = "memory";
Dkirkwood-lschlv2.dts9 memory {
10 device_type = "memory";
Dkirkwood-km_fixedeth.dts11 memory {
12 device_type = "memory";
Dxenvm-4.2.dts47 memory@80000000 {
48 device_type = "memory";
79 arm,v2m-memory-map = "rs1";
Dkirkwood-km_kirkwood.dts11 memory {
12 device_type = "memory";
Dqcom-msm8974-sony-xperia-honami.dts17 memory@0 {
19 device_type = "memory";
Dsocfpga_cyclone5_socrates.dts28 memory {
29 name = "memory";
30 device_type = "memory";
/linux-4.4.14/Documentation/devicetree/bindings/display/
Darm,pl11x.txt29 - memory-region: phandle to a node describing memory (see
30 Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt)
32 may be located anywhere in the memory
34 - max-memory-bandwidth: maximum bandwidth in bytes per second that the
35 cell's memory interface can handle; if not present, the memory
78 max-memory-bandwidth = <94371840>; /* Bps, 1024x768@60 16bpp */
Dst,stih4xx.txt6 - reg: Physical base address of the IP registers and length of memory mapped region.
14 - reg: Physical base address of the IP registers and length of memory mapped region.
32 - reg: Physical base address of the IP registers and length of memory mapped region.
48 - reg: Physical base address of the IP registers and length of memory mapped region.
49 - reg-names: names of the mapped memory regions listed in regs property in
60 - reg: Physical base address of the IP registers and length of memory mapped region.
61 - reg-names: names of the mapped memory regions listed in regs property in
76 - reg: Physical base address of the IP registers and length of memory mapped region.
77 - reg-names: names of the mapped memory regions listed in regs property in
89 - reg: Physical base address of the IP registers and length of memory mapped region.
[all …]
/linux-4.4.14/Documentation/fault-injection/
Dnotifier-error-inject.txt59 bash: echo: write error: Cannot allocate memory
64 /sys/kernel/debug/notifier-error-inject/memory/actions/<notifier event>/error
66 Possible memory notifier events to be failed are:
71 Example: Inject memory hotplug offline error (-12 == -ENOMEM)
73 # cd /sys/kernel/debug/notifier-error-inject/memory
75 # echo offline > /sys/devices/system/memory/memoryXXX/state
76 bash: echo: write error: Cannot allocate memory
93 for CPU and memory notifiers.
96 * tools/testing/selftests/memory-hotplug/on-off-test.sh
/linux-4.4.14/drivers/staging/android/
DKconfig10 The ashmem subsystem is a new shared memory allocator, similar to
14 It is, in theory, a good memory allocator for low-memory devices,
15 because it can discard shared memory units when under memory pressure.
34 Registers processes to be killed when low memory conditions, this is useful
38 scripts (/init.rc), and it defines priority values with minimum free memory size
/linux-4.4.14/tools/perf/Documentation/
Dperf-mem.txt6 perf-mem - Profile memory accesses
15 "perf mem record" runs a command and gathers memory operation data
19 right set of options to display a memory access profile. By default, loads
22 Note that on Intel systems the memory latency reported is the use-latency,
24 queueing delays in addition to the memory subsystem latency.
33 Select the memory operation type: load or store (default: load,store)
/linux-4.4.14/Documentation/filesystems/
Dtmpfs.txt1 Tmpfs is a file system which keeps all files in virtual memory.
20 pages currently in memory will show up as cached. It will not show up
29 memory.
36 POSIX shared memory (shm_open, shm_unlink). Adding the following
44 This mount is _not_ needed for SYSV shared memory. The internal
47 shared memory)
62 since the OOM handler will not be able to free that memory.
77 use up all the memory on the machine; but enhances the scalability of
81 tmpfs has a mount option to set the NUMA memory allocation policy for
87 mpol=prefer:Node prefers to allocate memory from the given Node
[all …]
Dproc.txt45 3.9 /proc/<pid>/map_files - Information about memory mapped files
141 statm Process memory status information
147 smaps a extension based on maps, showing the memory consumption of
149 numa_maps an extension based on maps, showing the memory locality and
200 memory usage. Its seven fields are explained in Table 1-3. The stat file
230 VmPeak peak virtual memory size
232 VmLck locked memory size
234 VmRSS size of memory portions
242 HugetlbPages size of hugetlb memory portions
257 Mems_allowed mask of memory nodes allowed to this process
[all …]
Ddax.txt11 For block devices that are memory-like, the page cache pages would be
33 that identifies the physical page for the memory. It also returns a
34 kernel virtual address that can be used to access the memory.
43 a large amount of memory through a smaller window, then you cannot
91 Calling get_user_pages() on a range of user memory that has been mmaped
94 reads/writes to those memory ranges from a non-DAX file will fail (note
95 that O_DIRECT reads/writes _of a DAX file_ do work, it is the memory
/linux-4.4.14/drivers/media/v4l2-core/
Dvideobuf2-v4l2.c85 length = (b->memory == VB2_MEMORY_USERPTR || in __verify_length()
86 b->memory == VB2_MEMORY_DMABUF) in __verify_length()
100 length = (b->memory == VB2_MEMORY_USERPTR) in __verify_length()
167 if (b->memory != q->memory) { in vb2_queue_or_prepare_buf()
189 b->memory = vb->memory; in __fill_v4l2_buffer()
212 if (q->memory == VB2_MEMORY_MMAP) in __fill_v4l2_buffer()
214 else if (q->memory == VB2_MEMORY_USERPTR) in __fill_v4l2_buffer()
216 else if (q->memory == VB2_MEMORY_DMABUF) in __fill_v4l2_buffer()
228 if (q->memory == VB2_MEMORY_MMAP) in __fill_v4l2_buffer()
230 else if (q->memory == VB2_MEMORY_USERPTR) in __fill_v4l2_buffer()
[all …]
Dvideobuf2-core.c201 static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, in __vb2_queue_alloc() argument
221 vb->memory = memory; in __vb2_queue_alloc()
224 if (memory == VB2_MEMORY_MMAP) { in __vb2_queue_alloc()
251 if (memory == VB2_MEMORY_MMAP) in __vb2_queue_alloc()
275 if (q->memory == VB2_MEMORY_MMAP) in __vb2_free_mem()
277 else if (q->memory == VB2_MEMORY_DMABUF) in __vb2_free_mem()
394 q->memory = 0; in __vb2_queue_free()
500 enum vb2_memory memory, unsigned int type) in vb2_verify_memory_type() argument
502 if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR && in vb2_verify_memory_type()
503 memory != VB2_MEMORY_DMABUF) { in vb2_verify_memory_type()
[all …]
Dvideobuf-core.c327 b->memory = vb->memory; in videobuf_status()
328 switch (b->memory) { in videobuf_status()
386 enum v4l2_memory memory) in __videobuf_mmap_setup() argument
405 q->bufs[i]->memory = memory; in __videobuf_mmap_setup()
407 switch (memory) { in __videobuf_mmap_setup()
430 enum v4l2_memory memory) in videobuf_mmap_setup() argument
434 ret = __videobuf_mmap_setup(q, bcount, bsize, memory); in videobuf_mmap_setup()
446 if (req->memory != V4L2_MEMORY_MMAP && in videobuf_reqbufs()
447 req->memory != V4L2_MEMORY_USERPTR && in videobuf_reqbufs()
448 req->memory != V4L2_MEMORY_OVERLAY) { in videobuf_reqbufs()
[all …]
/linux-4.4.14/Documentation/devicetree/bindings/edac/
Dapm-xgene-edac.txt6 memory controller - Memory controller
16 - regmap-mcba : Regmap of the MCB-A (memory bridge) resource.
17 - regmap-mcbb : Regmap of the MCB-B (memory bridge) resource.
23 Required properties for memory controller subnode:
25 - reg : First resource shall be the memory controller unit
27 - memory-controller : Instance number of the memory controller.
84 memory-controller = <0>;
/linux-4.4.14/Documentation/arm64/
Dmemory.txt6 This document describes the virtual memory layout used by the AArch64
14 virtual address, are used but the memory layout is the same.
24 AArch64 Linux memory layout with 4KB pages + 3 levels:
32 AArch64 Linux memory layout with 4KB pages + 4 levels:
40 AArch64 Linux memory layout with 64KB pages + 2 levels:
48 AArch64 Linux memory layout with 64KB pages + 3 levels:
56 For details of the virtual kernel memory layout please see the kernel
/linux-4.4.14/drivers/base/
Dmemory.c593 int register_memory(struct memory_block *memory) in register_memory() argument
595 memory->dev.bus = &memory_subsys; in register_memory()
596 memory->dev.id = memory->start_section_nr / sections_per_block; in register_memory()
597 memory->dev.release = memory_block_release; in register_memory()
598 memory->dev.groups = memory_memblk_attr_groups; in register_memory()
599 memory->dev.offline = memory->state == MEM_OFFLINE; in register_memory()
601 return device_register(&memory->dev); in register_memory()
604 static int init_memory_block(struct memory_block **memory, in init_memory_block() argument
627 *memory = mem; in init_memory_block()
686 unregister_memory(struct memory_block *memory) in unregister_memory() argument
[all …]
/linux-4.4.14/arch/metag/boot/dts/
Dskeleton.dtsi3 * add a compatible value. The bootloader will typically populate the memory
13 memory { device_type = "memory"; reg = <0 0>; };
/linux-4.4.14/arch/microblaze/mm/
Dinit.c127 for_each_memblock(memory, reg) { in setup_memory()
189 for_each_memblock(memory, reg) { in setup_memory()
196 &memblock.memory, 0); in setup_memory()
292 memblock.memory.regions[0].size = memory_size; in mm_cmdline_setup()
335 if ((u32) memblock.memory.regions[0].size < 0x400000) { in mmu_init()
340 if ((u32) memblock.memory.regions[0].size < kernel_tlb) { in mmu_init()
346 memory_start = (u32) memblock.memory.regions[0].base; in mmu_init()
347 lowmem_size = memory_size = (u32) memblock.memory.regions[0].size; in mmu_init()
/linux-4.4.14/Documentation/s390/
Dzfcpdump.txt6 not overwrite memory of the crashed Linux with data of the dump tool, the
7 hardware saves some memory plus the register sets of the boot CPU before the
9 memory afterwards. Currently 32 MB are saved.
12 a user space dump tool, which are loaded together into the saved memory region
19 which exports memory and registers of the crashed Linux in an s390
21 dump format defines a 4K header followed by plain uncompressed memory. The
25 memory, which has been saved by hardware is read by the driver via the SCLP
27 memory.
/linux-4.4.14/arch/mips/cavium-octeon/
Dsetup.c897 int64_t memory; in plat_mem_setup() local
935 memory = cvmx_bootmem_phy_alloc(mem_alloc_size, in plat_mem_setup()
939 if (memory >= 0) { in plat_mem_setup()
952 &memory, &size); in plat_mem_setup()
955 &memory, &size); in plat_mem_setup()
957 end = memory + mem_alloc_size; in plat_mem_setup()
964 if (memory < crashk_base && end > crashk_end) { in plat_mem_setup()
966 add_memory_region(memory, in plat_mem_setup()
967 crashk_base - memory, in plat_mem_setup()
969 total += crashk_base - memory; in plat_mem_setup()
[all …]
/linux-4.4.14/arch/mips/sgi-ip27/
DKconfig11 for more memory. Your hardware is almost certainly running in
19 for more memory. Your hardware is almost certainly running in
28 Change the way a Linux kernel is loaded into memory on a MIPS64
38 nodes in a NUMA cluster. This trades memory for speed.
45 across multiple nodes in a NUMA cluster. This trades memory for
/linux-4.4.14/Documentation/devicetree/bindings/dma/
Darm-pl330.txt3 The ARM PrimeCell PL330 DMA controller can move blocks of memory contents
4 between memory and peripherals or memory to memory.
8 - reg: physical base address of the controller and length of memory mapped
Dmpc512x-dma.txt4 blocks of memory contents between memory and peripherals or
5 from memory to memory.
/linux-4.4.14/Documentation/usb/
Ddma.txt42 memory. They work like kmalloc and kfree versions that give you the right
53 to use this type of memory ("dma-coherent"), and memory returned from
56 The memory buffer returned is "dma-coherent"; sometimes you might need to
57 force a consistent memory access ordering by using memory barriers. It's
66 On most systems the memory returned will be uncached, because the
67 semantics of dma-coherent memory require either bypassing CPU caches
72 - Devices on some EHCI controllers could handle DMA to/from high memory.
81 high memory to "normal" DMA memory. If you can come up with a good way
82 to fix this issue (for x86_32 machines with over 1 GByte of memory),
91 of Documentation/DMA-API-HOWTO.txt, titled "What memory is DMA-able?")
/linux-4.4.14/Documentation/devicetree/bindings/media/
Ds5p-mfc.txt16 - reg : Physical base address of the IP registers and length of memory
24 - samsung,mfc-r : Base address of the first memory bank used by MFC
25 for DMA contiguous memory allocation and its size.
27 - samsung,mfc-l : Base address of the second memory bank used by MFC
28 for DMA contiguous memory allocation and its size.
/linux-4.4.14/arch/arm64/boot/dts/hisilicon/
Dhi6220-hikey.dts10 /*Reserved 1MB memory for MCU*/
30 memory@0 {
31 device_type = "memory";
/linux-4.4.14/arch/metag/mm/
DKconfig46 lower latencies than main memory. This enables support for
48 memory policies to be used for prioritizing and controlling
56 The kernel memory allocator divides physically contiguous memory
59 keeps in the memory allocator. If you need to allocate very large
60 blocks of physically contiguous memory, then you may need to
64 a value of 11 means that the largest free memory block is 2^10 pages.
/linux-4.4.14/arch/powerpc/platforms/pseries/
DKconfig97 bool "Support for shared-memory logical partitions"
102 Select this option to enable shared memory partition support.
104 memory than physically available and will allow firmware to
105 balance memory across many LPARs.
108 tristate "Collaborative memory management"
113 to reduce the memory size of the system. This is accomplished
114 by allocating pages of memory and put them "on hold". This only
117 balance memory across many LPARs.
/linux-4.4.14/arch/arm/include/debug/
Dvexpress.S28 @ Make an educated guess regarding the memory map:
38 @ Original memory map
43 @ RS1 memory map
/linux-4.4.14/Documentation/devicetree/bindings/memory-controllers/ti/
Demif.txt6 of the EMIF IP and memory parts attached to it.
18 - device-handle : phandle to a "lpddr2" node representing the memory part
26 instance has a memory part attached to it. If there is a memory
28 so there is no need to give the details of this memory part.
/linux-4.4.14/Documentation/mic/
Dscif_overview.txt18 6. Remote memory mapping for low latency CPU accesses via mmap
23 nodes in a SCIF PCIe "network" to share memory "windows" and to communicate. A
27 can also register local memory which is followed by data transfer using either
28 DMA, CPU copies or remote memory mapping via mmap. SCIF supports both user and
60 SCIF allows memory sharing via mmap(..) between processes on different PCIe
84 /* Register memory */
95 /* Access remote registered memory */
/linux-4.4.14/Documentation/networking/
Dnetlink_mmap.txt1 This file documents how to use memory mapped I/O with netlink.
18 user-space memory without copying them as done with regular socket I/O,
22 The TX ring is used to process messages directly from user-space memory, the
29 In order to use memory mapped netlink I/O, user-space needs three main changes:
51 On kernel side, in order to make use of memory mapped I/O on receive, the
52 originating netlink subsystem needs to support memory mapped I/O, otherwise
55 Dumps of kernel databases automatically support memory mapped I/O.
58 use memory from the TX ring instead of (usually) a buffer declared on the
62 Structured and definitions for using memory mapped I/O are contained in
68 Each ring contains a number of continuous memory blocks, containing frames of
[all …]
/linux-4.4.14/arch/avr32/mach-at32ap/
DKconfig6 prompt "AT32AP700x static memory bus width"
10 Define the width of the AP7000 external static memory interface.
14 The current code can only support a single external memory bus
/linux-4.4.14/arch/frv/
DKconfig82 bool "High memory support"
86 If you wish to use more than 256MB of memory with your MMU based
88 the memory between 0xC0000000 and 0xD0000000 directly... everything
98 The VM uses one page of memory for each page table. For systems
99 with a lot of RAM, this can be wasteful of precious low memory.
100 Setting this option will put user-space page tables in high memory.
154 prevent userspace accessing the underlying memory directly.
170 one there; it will write directly to memory instead.
172 Write-Through caching only fetches cachelines from memory on a
173 read. Writes always get written directly to memory. If the affected
[all …]
/linux-4.4.14/arch/tile/
DKconfig176 smaller kernel memory footprint results from using a smaller
184 performance on memory-intensive applications, a page size of 64KB
187 memory more efficiently at some cost in TLB performance.
253 amount of physical memory, not all of it can be "permanently
254 mapped" by the kernel. The physical memory that's not
255 permanently mapped is called "high memory".
260 physical memory into the top 1 GB of virtual memory space.
286 NUMA memory allocation is required for TILE processors
287 unless booting with memory striping enabled in the
288 hypervisor, or with only a single memory controller.
[all …]
/linux-4.4.14/Documentation/kdump/
Dkdump.txt12 dump of the system kernel's memory needs to be taken (for example, when
13 the system panics). The system kernel's memory image is preserved across
17 memory image to a dump file on the local disk, or across the network to
23 When the system kernel boots, it reserves a small section of memory for
27 memory.
29 On x86 machines, the first 640 KB of physical memory is needed to boot,
33 Similarly on PPC64 machines first 32KB of physical memory is needed for
35 size kexec backs up the first 64KB memory.
43 encoded in the ELF format, and stored in a reserved area of memory
50 With the dump-capture kernel, you can access the memory image through
[all …]
/linux-4.4.14/arch/cris/arch-v10/
DREADME.mm22 First version of CRIS/MMU memory layout specification.
35 segmentation of the kernel memory space. We use this feature to avoid having
36 to use page-tables to map the physical memory into the kernel's address
43 registers are needed for each memory access to specify which MMU space to
47 internal chip I/O registers and the flash memory area (including SRAM
91 and shrinking the user-mode memory space.
95 memory.
101 The kernel also needs its own virtual memory space. That is kseg_d. It
103 chunks of memory not possible using the normal kmalloc physical RAM
155 The paging mechanism uses virtual addresses to split a process memory-space into
[all …]
/linux-4.4.14/Documentation/ABI/stable/
Dsysfs-devices-node17 Nodes that have regular memory.
29 Nodes that have regular or high memory.
56 Provides information about the node's distribution and memory
77 The node's zoned virtual memory statistics.
84 When this file is written to, all memory within that node
85 will be compacted. When it completes, memory will be freed
/linux-4.4.14/drivers/mtd/maps/
Dgpio-addr-flash.c208 struct resource *memory; in gpio_flash_probe() local
213 memory = platform_get_resource(pdev, IORESOURCE_MEM, 0); in gpio_flash_probe()
216 if (!memory || !gpios || !gpios->end) in gpio_flash_probe()
231 state->win_size = resource_size(memory); in gpio_flash_probe()
241 state->map.virt = ioremap_nocache(memory->start, state->map.size); in gpio_flash_probe()
262 state->mtd = do_map_probe(memory->name, &state->map); in gpio_flash_probe()
Dbfin-async-flash.c130 struct resource *memory = platform_get_resource(pdev, IORESOURCE_MEM, 0); in bfin_flash_probe() local
144 state->map.size = resource_size(memory); in bfin_flash_probe()
145 state->map.virt = (void __iomem *)memory->start; in bfin_flash_probe()
146 state->map.phys = memory->start; in bfin_flash_probe()
160 state->mtd = do_map_probe(memory->name, &state->map); in bfin_flash_probe()
/linux-4.4.14/arch/sh/mm/
DKconfig7 bool "Support for memory management hardware"
34 The kernel memory allocator divides physically contiguous memory
37 keeps in the memory allocator. If you need to allocate very large
38 blocks of physically contiguous memory, then you may need to
42 a value of 11 means that the largest free memory block is 2^10 pages.
48 hex "Physical memory start address"
55 The physical memory (RAM) start address will be automatically
64 hex "Physical memory size"
67 This sets the default memory size assumed by your SH kernel. It can
120 memory policies to be used for prioritizing and controlling
/linux-4.4.14/arch/xtensa/
DKconfig143 bool "Unaligned memory access in use space"
146 memory accesses in hardware but through an exception handler.
147 Per default, unaligned memory accesses are disabled in user space.
149 Say Y here to enable unaligned memory access in user space.
211 to mapping the MMU and after mapping even if the area of low memory
229 lowermost 128 MB of memory linearly to the areas starting
231 When there are more than 128 MB memory in the system not
233 The physical memory that's not permanently mapped is called
234 "high memory".
350 memory size and the root device (e.g., mem=64M root=/dev/nfs).
[all …]
/linux-4.4.14/Documentation/scsi/
Dhptiop.txt85 A request packet can be allocated in either IOP or host memory.
90 allocate a free request in host DMA coherent memory.
95 Requests allocated in host memory must be aligned on 32-bytes boundary.
100 allocated in IOP memory, write the offset to inbound queue port. For
101 requests allocated in host memory, write (0x80000000|(bus_addr>>5))
108 For requests allocated in IOP memory, the request offset is posted to
111 For requests allocated in host memory, (0x80000000|(bus_addr>>5))
118 For requests allocated in IOP memory, the host driver free the request
133 - Allocate a free request in host DMA coherent memory.
135 Requests allocated in host memory must be aligned on 32-bytes boundary.
/linux-4.4.14/Documentation/blockdev/
Dzram.txt8 in memory itself. These disks allow very fast I/O and compression provides
9 good amounts of memory savings. Some of the usecases include /tmp storage,
40 -ENOMEM -- zram was not able to allocate enough memory to fulfil your
110 There is little point creating a zram of greater than twice the size of memory
114 5) Set memory limit: Optional
115 Set memory limit by writing the value to sysfs node 'mem_limit'.
119 # limit /dev/zram0 with 50MB memory
127 # To disable memory limit
177 mem_used_total RO the amount of memory allocated for this disk
178 mem_used_max RW the maximum amount of memory zram have consumed to
[all …]
/linux-4.4.14/Documentation/virtual/kvm/devices/
Dvm.txt29 Parameters: in attr->addr the address for the new limit of guest memory
32 -E2BIG if the given guest memory is to big for that machine
34 -ENOMEM if not enough memory is available for a new shadow guest mapping
38 the maximum guest memory size. The limit will be rounded up to
60 -ENOMEM if not enough memory is available to process the ioctl
84 -ENOMEM if not enough memory is available to process the ioctl
/linux-4.4.14/Documentation/arm/
Dvlocks.txt5 mechanism, with reasonable but minimal requirements on the memory
14 vlocks make use of the atomicity provided by the memory system for
15 writes to a single memory location. To arbitrate, every CPU "votes for
16 itself", by storing a unique number to a common memory location. The
17 final value seen in that memory location when all the votes have been
71 However, once the election has started, the underlying memory system
127 reduces the number of round-trips required to external memory.
149 The optimisation relies on the fact that the ARM memory system
150 guarantees coherency between overlapping memory accesses of
171 when executing the algorithm in cached memory.
[all …]
Duefi.txt40 When booting in UEFI mode, the stub deletes any memory nodes from a provided DT.
41 Instead, the kernel reads the UEFI memory map.
50 linux,uefi-mmap-start | 64-bit | Physical address of the UEFI memory map,
53 linux,uefi-mmap-size | 32-bit | Size in bytes of the UEFI memory map
57 | | memory map.
DIXP4xx16 supports faster speeds, new memory and flash configurations, and more
69 The IXP4xx family allows for up to 256MB of memory but the PCI interface
70 can only expose 64MB of that memory to the PCI bus. This means that if
76 IXP4xx provides two methods of accessing PCI memory space:
82 limits the system to just 64MB of PCI memory. This can be
83 problamatic if using video cards and other memory-heavy devices.
85 2) If > 64MB of memory space is required, the IXP4xx can be
87 for up to 128MB (0x48000000 to 0x4fffffff) of memory on the bus.
95 you need more PCI memory, enable the IXP4XX_INDIRECT_PCI config option.
/linux-4.4.14/Documentation/xtensa/
Datomctl.txt6 can do Atomic Transactions to the memory internally.
22 use the memory controllers RCW, thought non-MX controlers likely
26 Virtually all customers buy their memory controllers from vendors that
27 don't support atomic RCW memory transactions and will likely want to
/linux-4.4.14/arch/mn10300/kernel/
Dgdb-low.S27 # GDB stub read memory with guard
28 # - D0 holds the memory address to read
70 # GDB stub write memory with guard
72 # - D1 holds the memory address to write
/linux-4.4.14/drivers/mtd/onenand/
DKconfig31 via the GPMC memory controller.
44 One Block of the NAND Flash Array memory is reserved as
45 a One-Time Programmable Block memory area.
49 operations as any other NAND Flash Array memory block.
59 Flash memory array, these two component enables simultaneous program
/linux-4.4.14/Documentation/virtual/kvm/
Dmsr.txt18 data: 4-byte alignment physical address of a memory area which must be
19 in guest RAM. This memory is expected to hold a copy of the following
52 data: 4-byte aligned physical address of a memory area which must be in
53 guest RAM, plus an enable bit in bit 0. This memory is expected to hold
168 64 byte memory area which must be in guest RAM and must be
174 First 4 byte of 64 byte memory location will be written to by
203 data: 64-byte alignment physical address of a memory area which must be
204 in guest RAM, plus an enable bit in bit 0. This memory is expected to
239 physical address of a 4 byte memory area which must be in guest RAM and
242 The first, least significant bit of 4 byte memory location will be
[all …]
/linux-4.4.14/Documentation/namespaces/
Dresource-control.txt7 Therefore it is recommended that memory control groups be enabled in
9 that userspace configure memory control groups to limit how much
10 memory user's they don't trust to play nice can use.
/linux-4.4.14/arch/xtensa/boot/dts/
Dlx60.dts7 memory@0 {
8 device_type = "memory";
Dml605.dts7 memory@0 {
8 device_type = "memory";
Dlx200mx.dts7 memory@0 {
8 device_type = "memory";
Dkc705.dts10 memory@0 {
11 device_type = "memory";
Dkc705_nommu.dts10 memory@0 {
11 device_type = "memory";
/linux-4.4.14/Documentation/dmaengine/
Dprovider.txt32 memory copy operation, but our audio device could have a narrower FIFO
38 or destination, can group the reads or writes in memory into a buffer,
39 so instead of having a lot of small memory accesses, which is not
78 These were just the general memory-to-memory (also called mem2mem) or
79 memory-to-device (mem2dev) kind of transfers. Most devices often
80 support other kind of transfers or memory operations that dmaengine
87 async TX API, to offload operations such as memory copy, XOR,
88 cryptography, etc., basically any memory to memory operation.
90 Over time, the need for memory to device transfers arose, and
110 structure. Any of the usual memory allocators will do, but you'll also
[all …]
/linux-4.4.14/drivers/edac/
DKconfig19 memory errors, cache errors, PCI errors, thermal throttling, etc..
69 memory. EDAC can report statistics on memory error
72 occurred so that a particular failing memory module can be
103 the AMD64 families (>= K8) of memory controllers.
114 When enabled, in each of the respective memory controller directories
151 82443BX/GX memory controllers (440BX/GX chipsets).
277 tristate "Cell Broadband Engine memory controller"
281 Cell Broadband Engine internal memory controller
288 This enables support for EDAC on the ECC memory used
289 with the IBM DDR2 memory controller found in various
[all …]
/linux-4.4.14/Documentation/devicetree/bindings/mtd/
Dnxp-spifi.txt5 mode 0 or 3. The controller operates in either command or memory
6 mode. In memory mode the Flash is accessible from the CPU as
7 normal memory.
12 the second contains the memory mapping address and length
Dfsl-quadspi.txt7 the second contains the memory mapping address and length
8 - reg-names: Should contain the reg names "QuadSPI" and "QuadSPI-memory"
27 reg-names = "QuadSPI", "QuadSPI-memory";
/linux-4.4.14/arch/c6x/
DKconfig91 The kernel memory allocator divides physically contiguous memory
94 keeps in the memory allocator. If you need to allocate very large
95 blocks of physically contiguous memory, then you may need to
99 a value of 11 means that the largest free memory block is 2^10 pages.
106 hex "Virtual address of memory base"
/linux-4.4.14/arch/m68k/ifpsp060/
Dos.S57 | Each IO routine checks to see if the memory write/read is to/from user
76 | Writes to data memory while in supervisor mode.
107 | Reads from data/instruction memory while in supervisor mode.
140 | Read a data byte from user memory.
163 | Read a data word from user memory.
174 | Read an instruction word from user memory.
210 | Read an instruction longword from user memory.
234 | Write a data byte to user memory.
256 | Write a data word to user memory.
280 | Write a data longword to user memory.
/linux-4.4.14/Documentation/x86/
Dpat.txt4 x86 Page Attribute Table (PAT) allows for setting the memory attribute at the
6 for setting of memory types over physical address ranges. However, PAT is
10 not having memory type aliasing for the same physical memory with multiple
13 PAT allows for different types of memory attributes. The most commonly used
21 There are many different APIs in the kernel that allows setting of memory
24 their intended usage and their memory attribute relationships. Internally,
121 combine areas of IO memory desired to remain uncacheable with areas where
124 nevertheless discouraged as the effective memory type is considered
130 MTRR Non-PAT PAT Linux ioremap value Effective memory type
/linux-4.4.14/sound/isa/gus/
Dgus_pcm.c50 unsigned int memory; member
137 begin = pcmp->memory + voice * (pcmp->dma_size / runtime->channels); in snd_gf1_pcm_trigger_up()
229 end = pcmp->memory + (((pcmp->bpos + 1) * pcmp->block_size) / runtime->channels); in snd_gf1_pcm_interrupt_wave()
262 snd_gf1_pcm_block_change(pcmp->substream, end, pcmp->memory + (end / 2), pcmp->block_size / 2); in snd_gf1_pcm_interrupt_wave()
263 …snd_gf1_pcm_block_change(pcmp->substream, end + (pcmp->block_size / 2), pcmp->memory + (pcmp->dma_… in snd_gf1_pcm_interrupt_wave()
265 snd_gf1_pcm_block_change(pcmp->substream, end, pcmp->memory + end, pcmp->block_size); in snd_gf1_pcm_interrupt_wave()
375 return snd_gf1_pcm_block_change(substream, bpos, pcmp->memory + bpos, len); in snd_gf1_pcm_playback_copy()
382 …if ((err = snd_gf1_pcm_poke_block(gus, runtime->dma_area + bpos, pcmp->memory + bpos, len, w16, in… in snd_gf1_pcm_playback_copy()
405 return snd_gf1_pcm_block_change(substream, bpos, pcmp->memory + bpos, len); in snd_gf1_pcm_playback_silence()
412 …if ((err = snd_gf1_pcm_poke_block(gus, runtime->dma_area + bpos, pcmp->memory + bpos, len, w16, in… in snd_gf1_pcm_playback_silence()
[all …]
/linux-4.4.14/arch/mips/boot/dts/ralink/
Drt3883_eval.dts9 memory@0 {
10 device_type = "memory";
Dmt7620a_eval.dts9 memory@0 {
10 device_type = "memory";
/linux-4.4.14/arch/mips/boot/dts/brcm/
Dbcm9ejtagprb.dts9 memory@0 {
10 device_type = "memory";
Dbcm96368mvwg.dts9 memory@0 {
10 device_type = "memory";
Dbcm97125cbmb.dts9 memory@0 {
10 device_type = "memory";
Dbcm97358svmb.dts9 memory@0 {
10 device_type = "memory";
Dbcm97420c.dts9 memory@0 {
10 device_type = "memory";
Dbcm97360svmb.dts9 memory@0 {
10 device_type = "memory";
/linux-4.4.14/arch/s390/
DKconfig.debug13 disabled, you allow userspace access to all memory, including
14 kernel and userspace memory. Accidental memory access is likely
/linux-4.4.14/drivers/firmware/efi/
DKconfig49 Export efi runtime memory maps to /sys/firmware/efi/runtime-map.
50 That memory map is used for example by kexec to set up efi virtual
56 bool "Enable EFI fake memory map"
62 to specific memory range by updating original (firmware provided)
81 the EFI runtime support gets system table address, memory
/linux-4.4.14/arch/m68k/mm/
DMakefile8 obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o hwtest.o
10 obj-$(CONFIG_MMU_COLDFIRE) += kmap.o memory.o mcfmmu.o
/linux-4.4.14/arch/mips/boot/dts/mti/
Dsead3.dts18 memory {
19 device_type = "memory";
/linux-4.4.14/Documentation/devicetree/bindings/gpio/
Dgpio-mm-lantiq.txt1 Lantiq SoC External Bus memory mapped GPIO controller
4 only gpios. This driver configures a special memory address, which when
7 The node describing the memory mapped GPIOs needs to be a child of the node
/linux-4.4.14/Documentation/fb/
Dintelfb.txt35 select amount of system RAM in MB to allocate for the video memory
42 select at what offset in MB of the logical memory to allocate the
43 framebuffer memory. The intent is to avoid the memory blocks
66 enable MTRR. This allows data transfers to the framebuffer memory
68 Not very helpful with the intel chips because of 'shared memory'.
Dtridentfb.txt47 memsize - integer value in KB, use if your card's memory size is misdetected.
51 more memory than it actually has. For instance mine is 192K less than
53 Only use if your video memory is taken from main memory hence of
55 If in some modes which barely fit the memory you see garbage
/linux-4.4.14/Documentation/acpi/apei/
Deinj.txt87 error type is memory related type, the param1 should be a valid
88 physical memory address. [Unless "flag" is set - see above]
92 Same use as param1 above. For example, if error type is of memory
93 related type, then param2 should be a physical memory address mask.
108 error in some other context by a simple access to the CPU, memory
117 for memory injections to be specified by the param1 and param2 files in
131 For memory errors (type 0x8, 0x10 and 0x20) the address is set using
162 # echo 0x12345000 > param1 # Set memory address for injection
164 # echo 0x8 > error_type # Choose correctable memory error
174 [22716.616173] EDAC MC3: 1 CE memory read error on CPU_SrcID#0_Channel#0_DIMM#0 (channel:0 slot:0 p…
/linux-4.4.14/arch/arm/
DKconfig-nommu43 occupied by read-only memory depending on H/W design.
45 If the region contains read-write memory, say 'n' here.
48 vectors to be mapped to writable memory, say 'n' here.
61 memory.
/linux-4.4.14/Documentation/devicetree/bindings/net/
Dmdio-mux-mmioreg.txt1 Properties for an MDIO bus multiplexer controlled by a memory-mapped device
3 This is a special case of a MDIO bus multiplexer. A memory-mapped device,
5 node must be a child of the memory-mapped device. The driver currently only
23 The FPGA node defines a memory-mapped FPGA with a register space of 0x30 bytes.
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/bios/
Dperf.c105 info->memory = nvbios_rd32(bios, perf + 0x05) * 20; in nvbios_perfEp()
121 info->memory = nvbios_rd16(bios, perf + 0x0b) * 1000; in nvbios_perfEp()
124 info->memory = nvbios_rd16(bios, perf + 0x0b) * 2000; in nvbios_perfEp()
133 info->memory = nvbios_rd16(bios, perf + 0x0c) * 1000; in nvbios_perfEp()
142 info->memory = nvbios_rd16(bios, perf + 0x0c) * 1000; in nvbios_perfEp()
/linux-4.4.14/arch/x86/um/
DKconfig50 memory. All the memory that can't be mapped directly will be treated
51 as high memory.
/linux-4.4.14/arch/h8300/boot/dts/
Dh8300h_sim.dts38 memory@400000 {
39 device_type = "memory";
59 bsc: memory-controller@fee01e {
Dh8s_sim.dts44 memory@400000 {
45 device_type = "memory";
65 bsc: memory-controller@fffec0 {
/linux-4.4.14/arch/mips/boot/dts/ingenic/
Dci20.dts19 memory {
20 device_type = "memory";
/linux-4.4.14/Documentation/power/
Dstates.txt38 support, or it can be used in addition to Suspend-to-RAM (memory sleep)
64 system is put into a low-power state, except for memory, which should be placed
73 System and device state is saved and kept in memory. All devices are suspended
89 of writing memory contents to disk. On resume, this is read and memory
96 the firmware will also handle restoring memory contents on resume.
99 to write memory contents to free swap space. swsusp has some restrictive
105 Once memory state is written to disk, the system may either enter a
Duserland-swsusp.txt10 utilities that will read/write the system memory snapshot from/to the
39 SNAPSHOT_CREATE_IMAGE - create a snapshot of the system memory; the
42 creating the snapshot (1) or after restoring the system memory state
48 SNAPSHOT_ATOMIC_RESTORE - restore the system memory state from the
50 the system memory snapshot back to the kernel using the write()
54 SNAPSHOT_FREE - free memory allocated for the snapshot image
101 - you cannot read() more than one virtual memory page at a time
106 The device's write() operation is used for uploading the system memory snapshot
109 The release() operation frees all memory allocated for the snapshot image
140 The suspending and resuming utilities MUST lock themselves in memory,
[all …]
/linux-4.4.14/drivers/staging/goldfish/
DREADME9 - Use dma coherent memory not kmalloc/__pa for the memory (this is just
/linux-4.4.14/drivers/mtd/devices/
DKconfig10 from Ramix Inc. <http://www.ramix.com/products/memory/pmc551.html>.
11 These devices come in memory configurations from 32M - 1G. If you
15 the size of the aperture window pointing into the devices memory.
17 will use a 1G memory map as its view of the device. As a module,
18 you can select a 1M window into the memory and the driver will
19 "slide" the window around the PMC551's memory. This was
29 break other memory configurations. If unsure say N.
126 If your CPU cannot cache all of the physical memory in your machine,
135 Use this driver to access physical memory that the kernel proper
136 doesn't have access to, memory beyond the mem=xxx limit, nvram,
[all …]
/linux-4.4.14/drivers/media/usb/gspca/
Dgspca.c501 enum v4l2_memory memory, unsigned int count) in frame_alloc() argument
518 gspca_dev->memory = memory; in frame_alloc()
528 frame->v4l2_buf.memory = memory; in frame_alloc()
553 gspca_dev->memory = GSPCA_MEMORY_NO; in frame_free()
1376 i = rb->memory; /* (avoid compilation warning) */ in vidioc_reqbufs()
1388 if (gspca_dev->memory != GSPCA_MEMORY_NO in vidioc_reqbufs()
1389 && gspca_dev->memory != GSPCA_MEMORY_READ in vidioc_reqbufs()
1390 && gspca_dev->memory != rb->memory) { in vidioc_reqbufs()
1417 if (gspca_dev->memory == GSPCA_MEMORY_READ) in vidioc_reqbufs()
1426 ret = frame_alloc(gspca_dev, file, rb->memory, rb->count); in vidioc_reqbufs()
[all …]
/linux-4.4.14/arch/powerpc/
DKconfig321 bool "High memory support"
415 bool "Add support for memory hwpoison"
454 while preserving memory contents. Firmware-assisted dump
514 # Some NUMA nodes have memory ranges that span
543 overhead. However the utilization of memory will increase.
601 The kernel memory allocator divides physically contiguous memory
604 keeps in the memory allocator. If you need to allocate very large
605 blocks of physically contiguous memory, then you may need to
609 a value of 11 means that the largest free memory block is 2^10 pages.
875 aspects of kernel memory management.
[all …]
/linux-4.4.14/arch/blackfin/
DKconfig386 the memory size and the root device (e.g., mem=8M, root=/dev/nfs).
395 of memory or you wish to reserve some memory at the beginning of
399 memory region is used to capture NULL pointer references as well
538 This sets the frequency of the DDR memory.
571 as the memory device datasheet.
755 into L1 instruction memory. (less latency)
763 (STORE/RESTORE CONTEXT) is linked into L1 instruction memory.
772 into L1 instruction memory. (less latency)
780 into L1 instruction memory. (less latency)
788 into L1 instruction memory. (less latency)
[all …]
/linux-4.4.14/scripts/coccinelle/misc/
Dbadty.cocci4 //# in memory allocation functions by checking the type of the allocated memory
6 //# to the the memory being allocated. There are false positives in cases the
/linux-4.4.14/Documentation/devicetree/bindings/pci/
Dversatile.txt17 - ranges: ranges for the PCI memory and I/O regions
36 0x02000000 0 0x50000000 0x50000000 0 0x10000000 /* non-prefetchable memory */
37 0x42000000 0 0x60000000 0x60000000 0 0x10000000>; /* prefetchable memory */

1234567891011