/linux-4.4.14/tools/vm/ |
D | slabinfo.c | 53 struct slabinfo *slab; member 327 if (a->slab == find && in find_one_alias() 1091 a->slab = s; in link_slabs() 1110 if (!show_single_ref && a->slab->refs == 1) in alias() 1115 if (strcmp(a->slab->name, active) == 0) { in alias() 1120 printf("\n%-12s <- %s", a->slab->name, a->name); in alias() 1121 active = a->slab->name; in alias() 1124 printf("%-15s -> %s\n", a->name, a->slab->name); in alias() 1154 static int slab_mismatch(char *slab) in slab_mismatch() argument 1156 return regexec(&pattern, slab, 0, NULL, 0); in slab_mismatch() [all …]
|
/linux-4.4.14/Documentation/ABI/testing/ |
D | sysfs-kernel-slab | 1 What: /sys/kernel/slab 7 The /sys/kernel/slab directory contains a snapshot of the 13 What: /sys/kernel/slab/cache/aliases 22 What: /sys/kernel/slab/cache/align 31 What: /sys/kernel/slab/cache/alloc_calls 42 What: /sys/kernel/slab/cache/alloc_fastpath 53 What: /sys/kernel/slab/cache/alloc_from_partial 59 The alloc_from_partial file shows how many times a cpu slab has 60 been full and it has been refilled by using a slab from the list 65 What: /sys/kernel/slab/cache/alloc_refill [all …]
|
/linux-4.4.14/Documentation/vm/ |
D | slub.txt | 6 slab caches. SLUB always includes full debugging but it is off by default. 34 slub_debug=<Debug-Options>,<slab name> 46 caused higher minimum slab orders 60 Red zoning and tracking may realign the slab. We can just apply sanity checks 65 Debugging options may require the minimum possible slab order to increase as 67 sizes). This has a higher liklihood of resulting in slab allocation errors 77 /sys/kernel/slab/<slab name>/ 80 corresponding debug option. All options can be set on a slab that does 81 not contain objects. If the slab already contains objects then sanity checks 86 used on the wrong slab. [all …]
|
D | split_page_table_lock | 56 Make sure the architecture doesn't use slab allocator for page table 57 allocation: slab uses page->slab_cache for its pages.
|
D | numa | 137 may revert to its own fallback path. The slab kernel memory allocator is an
|
/linux-4.4.14/tools/perf/Documentation/ |
D | perf-kmem.txt | 40 Sort the output (default: 'frag,hit,bytes' for slab and 'bytes,hit' 42 pingpong, frag' for slab and 'page, callsite, bytes, hit, order, 44 mode selection options - i.e. --slab, --page, --alloc and/or --caller. 53 --slab::
|
/linux-4.4.14/net/dccp/ |
D | ccid.c | 84 struct kmem_cache *slab; in ccid_kmem_cache_create() local 91 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, in ccid_kmem_cache_create() 93 return slab; in ccid_kmem_cache_create() 96 static void ccid_kmem_cache_destroy(struct kmem_cache *slab) in ccid_kmem_cache_destroy() argument 98 kmem_cache_destroy(slab); in ccid_kmem_cache_destroy()
|
/linux-4.4.14/block/ |
D | bio.c | 62 struct kmem_cache *slab; member 74 struct kmem_cache *slab = NULL; in bio_find_or_create_slab() local 85 if (!bslab->slab && entry == -1) in bio_find_or_create_slab() 88 slab = bslab->slab; in bio_find_or_create_slab() 95 if (slab) in bio_find_or_create_slab() 114 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, in bio_find_or_create_slab() 116 if (!slab) in bio_find_or_create_slab() 119 bslab->slab = slab; in bio_find_or_create_slab() 124 return slab; in bio_find_or_create_slab() 135 if (bs->bio_slab == bio_slabs[i].slab) { in bio_put_slab() [all …]
|
/linux-4.4.14/include/net/ |
D | request_sock.h | 33 struct kmem_cache *slab; member 87 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN); in reqsk_alloc() 118 kmem_cache_free(req->rsk_ops->slab, req); in reqsk_free()
|
D | sock.h | 1035 struct kmem_cache *slab; member
|
/linux-4.4.14/arch/ia64/include/asm/sn/ |
D | geo.h | 30 slabid_t slab:4; /* slab (ASIC), 0 .. 15 within slot */ member 115 INVALID_SLAB : g.common.slab; in geo_slab()
|
/linux-4.4.14/net/core/ |
D | sock.c | 1340 struct kmem_cache *slab; in sk_prot_alloc() local 1342 slab = prot->slab; in sk_prot_alloc() 1343 if (slab != NULL) { in sk_prot_alloc() 1344 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); in sk_prot_alloc() 1372 if (slab != NULL) in sk_prot_alloc() 1373 kmem_cache_free(slab, sk); in sk_prot_alloc() 1381 struct kmem_cache *slab; in sk_prot_free() local 1385 slab = prot->slab; in sk_prot_free() 1388 if (slab != NULL) in sk_prot_free() 1389 kmem_cache_free(slab, sk); in sk_prot_free() [all …]
|
/linux-4.4.14/arch/unicore32/mm/ |
D | init.c | 63 int shared = 0, cached = 0, slab = 0, i; in show_mem() local 87 slab++; in show_mem() 99 printk(KERN_DEFAULT "%d slab pages\n", slab); in show_mem()
|
/linux-4.4.14/arch/ia64/sn/kernel/sn2/ |
D | sn_hwperf.c | 90 int *rack, int *bay, int *slot, int *slab) in sn_hwperf_location_to_bpos() argument 96 rack, &type, bay, slab) == 4) in sn_hwperf_location_to_bpos() 100 rack, &type, bay, slot, slab) != 5) in sn_hwperf_location_to_bpos() 111 int rack, bay, slot, slab; in sn_hwperf_geoid_to_cnode() local 114 if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab)) in sn_hwperf_geoid_to_cnode() 129 slot == this_slot && slab == this_slab) { in sn_hwperf_geoid_to_cnode()
|
/linux-4.4.14/Documentation/DocBook/ |
D | .kernel-api.xml.cmd | 2 …ib/crc-ccitt.c include/linux/idr.h lib/idr.c lib/idr.c include/linux/slab.h mm/slab.c mm/util.c ar…
|
/linux-4.4.14/Documentation/zh_CN/ |
D | magic-number.txt | 99 RED_MAGIC2 0x170fc2a5 (any) mm/slab.c 114 SLAB_C_MAGIC 0x4f17a36d kmem_cache mm/slab.c 121 RED_MAGIC1 0x5a2cf071 (any) mm/slab.c
|
/linux-4.4.14/Documentation/fault-injection/ |
D | fault-injection.txt | 12 injects slab allocation failures. (kmalloc(), kmem_cache_alloc(), ...) 180 o Inject slab allocation failures into module init/exit code 263 Run a command "make -C tools/testing/selftests/ run_tests" with injecting slab 275 Same as above except to inject page allocation failure instead of slab
|
/linux-4.4.14/Documentation/ |
D | magic-number.txt | 99 RED_MAGIC2 0x170fc2a5 (any) mm/slab.c 114 SLAB_C_MAGIC 0x4f17a36d kmem_cache mm/slab.c 121 RED_MAGIC1 0x5a2cf071 (any) mm/slab.c
|
D | kmemcheck.txt | 203 kmemcheck in such a way that the slab caches which are under SLUB debugging 211 slab cache, and with kmemcheck tracking all the other caches. This is advanced 486 a - unallocated (memory has been allocated by the slab layer, but has not 488 f - freed (memory has been allocated by the slab layer, but has been freed 745 If a slab cache is set up using the SLAB_NOTRACK flag, it will never return 748 If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
|
D | SubmitChecklist | 81 21: Has been checked with injection of at least slab and page-allocation
|
D | nommu-mmap.txt | 189 of the mapping exceeds the size of the slab object currently occupied by the 190 memory to which the mapping refers, or if a smaller slab object could be used.
|
D | BUG-HUNTING | 238 with this look at mm/slab.c and search for POISON_INUSE. When using this an
|
D | kernel-per-CPU-kthreads.txt | 179 CONFIG_SLAB=y, thus avoiding the slab allocator's periodic
|
D | kernel-parameters.txt | 2389 caches in the slab allocator. Saves per-node memory, 3511 culprit if slab objects become corrupted. Enabling 3524 The minimum number of objects per slab. SLUB will 3525 increase the slab order up to slub_max_order to 3526 generate a sufficiently large slab able to contain
|
/linux-4.4.14/Documentation/ja_JP/ |
D | SubmitChecklist | 97 21: 少なくともslabアロケーションとpageアロケーションに失敗した場合の
|
/linux-4.4.14/mm/ |
D | memory-failure.c | 779 #define slab (1UL << PG_slab) macro 799 { slab, slab, MF_MSG_SLAB, me_kernel }, 831 #undef slab
|
D | Makefile | 52 obj-$(CONFIG_SLAB) += slab.o
|
D | slab.c | 270 #define MAKE_LIST(cachep, listp, slab, nodeid) \ argument 273 list_splice(&get_node(cachep, nodeid)->slab, listp); \
|
D | Kconfig | 574 zsmalloc is a slab-based memory allocator designed to store
|
/linux-4.4.14/Documentation/trace/ |
D | events-kmem.txt | 22 justified, particularly if kmalloc slab pages are getting significantly 36 of writing, no information is available on what slab is being allocated from,
|
/linux-4.4.14/tools/testing/fault-injection/ |
D | failcmd.sh | 58 inject slab allocation failures
|
/linux-4.4.14/drivers/scsi/ |
D | scsi_lib.c | 48 struct kmem_cache *slab; member 2282 sgp->slab = kmem_cache_create(sgp->name, size, 0, in scsi_init_queue() 2284 if (!sgp->slab) { in scsi_init_queue() 2291 sgp->slab); in scsi_init_queue() 2306 if (sgp->slab) in scsi_init_queue() 2307 kmem_cache_destroy(sgp->slab); in scsi_init_queue() 2323 kmem_cache_destroy(sgp->slab); in scsi_exit_queue()
|
/linux-4.4.14/Documentation/sysctl/ |
D | vm.txt | 190 reclaimable slab objects like dentries and inodes. Once dropped, their 195 To free reclaimable slab objects (includes dentries and inodes): 197 To free slab objects and pagecache: 446 than this percentage of pages in a zone are reclaimable slab pages. 447 This insures that the slab growth stays under control even in NUMA 452 Note that slab reclaim is triggered in a per zone / node fashion. 453 The process of reclaiming slab memory is currently not node specific
|
/linux-4.4.14/fs/befs/ |
D | ChangeLog | 159 * Made inode allocations use a slab cache 170 * Fixed a couple of compile warnings due to use of malloc.h, when slab.h 408 Needed to include <linux/slab.h> in most files
|
/linux-4.4.14/fs/nfsd/ |
D | state.h | 587 struct kmem_cache *slab);
|
D | nfs4state.c | 557 struct kmem_cache *slab) in nfs4_alloc_stid() argument 562 stid = kmem_cache_zalloc(slab, GFP_KERNEL); in nfs4_alloc_stid() 591 kmem_cache_free(slab, stid); in nfs4_alloc_stid() 3347 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4… in alloc_stateowner() argument 3351 sop = kmem_cache_alloc(slab, GFP_KERNEL); in alloc_stateowner() 3357 kmem_cache_free(slab, sop); in alloc_stateowner()
|
/linux-4.4.14/include/linux/ |
D | page-flags.h | 217 __PAGEFLAG(Slab, slab)
|
D | bio.h | 739 struct kmem_cache *slab; member
|
/linux-4.4.14/Documentation/cgroups/ |
D | memory.txt | 117 3. Kernel user memory accounting and slab control 303 to trigger slab reclaim when those limits are reached. 311 * slab pages: pages allocated by the SLAB or SLUB allocator are tracked. A copy 314 skipped while the cache is being created. All objects in a slab page should
|
D | cpusets.txt | 179 - cpuset.memory_spread_slab flag: if set, spread slab cache evenly on allowed nodes 316 then the kernel will spread some file system related slab caches, 333 or slab caches to ignore the task's NUMA mempolicy and be spread 355 PFA_SPREAD_SLAB, and appropriately marked slab caches will allocate
|
/linux-4.4.14/net/ipv6/ |
D | af_inet6.c | 170 WARN_ON(!answer_prot->slab); in inet6_create()
|
/linux-4.4.14/init/ |
D | Kconfig | 1699 SLUB sysfs support. /sys/slab will not exist and there will be 1718 This option allows to select a slab allocator. 1723 The regular slab allocator that is established and known to work 1730 SLUB is a slab allocator that minimizes cache line usage 1735 a slab allocator.
|
/linux-4.4.14/Documentation/filesystems/ |
D | proc.txt | 752 The slabinfo file gives information about memory usage at the slab level. 753 Linux uses slab pools for memory management above page level in version 2.2. 754 Commonly used objects have their own slab pool (such as network buffers, 868 slab will be reclaimable, due to items being in use. The
|
/linux-4.4.14/net/ipv4/ |
D | af_inet.c | 320 WARN_ON(!answer_prot->slab); in inet_create()
|
/linux-4.4.14/lib/ |
D | Kconfig.debug | 457 bool "Debug slab memory allocations" 515 of finding leaks due to the slab objects poisoning.
|
/linux-4.4.14/Documentation/networking/ |
D | packet_mmap.txt | 310 a pool of pre-determined sizes. This pool of memory is maintained by the slab
|
/linux-4.4.14/ |
D | CREDITS | 14 D: SLOB slab allocator 3429 D: slab, pipe, select.
|
/linux-4.4.14/Documentation/block/ |
D | biodoc.txt | 607 case of bio, these routines make use of the standard slab allocator.
|