/linux-4.1.27/tools/vm/ |
D | slabinfo.c | 53 struct slabinfo *slab; member 315 if (a->slab == find && in find_one_alias() 1065 a->slab = s; in link_slabs() 1084 if (!show_single_ref && a->slab->refs == 1) in alias() 1089 if (strcmp(a->slab->name, active) == 0) { in alias() 1094 printf("\n%-12s <- %s", a->slab->name, a->name); in alias() 1095 active = a->slab->name; in alias() 1098 printf("%-20s -> %s\n", a->name, a->slab->name); in alias() 1128 static int slab_mismatch(char *slab) in slab_mismatch() argument 1130 return regexec(&pattern, slab, 0, NULL, 0); in slab_mismatch() [all …]
|
/linux-4.1.27/Documentation/ABI/testing/ |
D | sysfs-kernel-slab | 1 What: /sys/kernel/slab 7 The /sys/kernel/slab directory contains a snapshot of the 13 What: /sys/kernel/slab/cache/aliases 22 What: /sys/kernel/slab/cache/align 31 What: /sys/kernel/slab/cache/alloc_calls 42 What: /sys/kernel/slab/cache/alloc_fastpath 53 What: /sys/kernel/slab/cache/alloc_from_partial 59 The alloc_from_partial file shows how many times a cpu slab has 60 been full and it has been refilled by using a slab from the list 65 What: /sys/kernel/slab/cache/alloc_refill [all …]
|
/linux-4.1.27/drivers/staging/lustre/lustre/include/ |
D | obd_support.h | 758 #define OBD_SLAB_FREE_RTN0(ptr, slab) \ argument 760 kmem_cache_free((slab), (ptr)); \ 765 #define __OBD_SLAB_ALLOC_VERBOSE(ptr, slab, cptab, cpt, size, type) \ argument 769 kmem_cache_alloc(slab, type | __GFP_ZERO) : \ 770 kmem_cache_alloc_node(slab, type | __GFP_ZERO, \ 776 OBD_SLAB_FREE_RTN0(ptr, slab)))) { \ 781 #define OBD_SLAB_ALLOC_GFP(ptr, slab, size, flags) \ argument 782 __OBD_SLAB_ALLOC_VERBOSE(ptr, slab, NULL, 0, size, flags) 783 #define OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, size, flags) \ argument 784 __OBD_SLAB_ALLOC_VERBOSE(ptr, slab, cptab, cpt, size, flags) [all …]
|
/linux-4.1.27/Documentation/vm/ |
D | slub.txt | 6 slab caches. SLUB always includes full debugging but it is off by default. 34 slub_debug=<Debug-Options>,<slab name> 46 caused higher minimum slab orders 60 Red zoning and tracking may realign the slab. We can just apply sanity checks 65 Debugging options may require the minimum possible slab order to increase as 67 sizes). This has a higher liklihood of resulting in slab allocation errors 77 /sys/kernel/slab/<slab name>/ 80 corresponding debug option. All options can be set on a slab that does 81 not contain objects. If the slab already contains objects then sanity checks 86 used on the wrong slab. [all …]
|
D | split_page_table_lock | 56 Make sure the architecture doesn't use slab allocator for page table 57 allocation: slab uses page->slab_cache and page->first_page for its pages.
|
D | numa | 137 may revert to its own fallback path. The slab kernel memory allocator is an
|
/linux-4.1.27/net/dccp/ |
D | ccid.c | 84 struct kmem_cache *slab; in ccid_kmem_cache_create() local 91 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, in ccid_kmem_cache_create() 93 return slab; in ccid_kmem_cache_create() 96 static void ccid_kmem_cache_destroy(struct kmem_cache *slab) in ccid_kmem_cache_destroy() argument 98 if (slab != NULL) in ccid_kmem_cache_destroy() 99 kmem_cache_destroy(slab); in ccid_kmem_cache_destroy()
|
/linux-4.1.27/drivers/staging/i2o/ |
D | memory.c | 277 pool->slab = in i2o_pool_alloc() 279 if (!pool->slab) in i2o_pool_alloc() 282 pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); in i2o_pool_alloc() 289 kmem_cache_destroy(pool->slab); in i2o_pool_alloc() 309 kmem_cache_destroy(pool->slab); in i2o_pool_free()
|
D | i2o_block.h | 67 struct kmem_cache *slab; member
|
D | i2o_block.c | 1154 i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0, in i2o_block_init() 1156 if (!i2o_blk_req_pool.slab) { in i2o_block_init() 1164 i2o_blk_req_pool.slab); in i2o_block_init() 1197 kmem_cache_destroy(i2o_blk_req_pool.slab); in i2o_block_init() 1219 kmem_cache_destroy(i2o_blk_req_pool.slab); in i2o_block_exit()
|
D | i2o.h | 494 struct kmem_cache *slab; member
|
/linux-4.1.27/block/ |
D | bio.c | 62 struct kmem_cache *slab; member 74 struct kmem_cache *slab = NULL; in bio_find_or_create_slab() local 85 if (!bslab->slab && entry == -1) in bio_find_or_create_slab() 88 slab = bslab->slab; in bio_find_or_create_slab() 95 if (slab) in bio_find_or_create_slab() 114 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, in bio_find_or_create_slab() 116 if (!slab) in bio_find_or_create_slab() 119 bslab->slab = slab; in bio_find_or_create_slab() 124 return slab; in bio_find_or_create_slab() 135 if (bs->bio_slab == bio_slabs[i].slab) { in bio_put_slab() [all …]
|
/linux-4.1.27/net/core/ |
D | sock.c | 1323 struct kmem_cache *slab; in sk_prot_alloc() local 1325 slab = prot->slab; in sk_prot_alloc() 1326 if (slab != NULL) { in sk_prot_alloc() 1327 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); in sk_prot_alloc() 1355 if (slab != NULL) in sk_prot_alloc() 1356 kmem_cache_free(slab, sk); in sk_prot_alloc() 1364 struct kmem_cache *slab; in sk_prot_free() local 1368 slab = prot->slab; in sk_prot_free() 1371 if (slab != NULL) in sk_prot_free() 1372 kmem_cache_free(slab, sk); in sk_prot_free() [all …]
|
/linux-4.1.27/arch/ia64/include/asm/sn/ |
D | geo.h | 30 slabid_t slab:4; /* slab (ASIC), 0 .. 15 within slot */ member 115 INVALID_SLAB : g.common.slab; in geo_slab()
|
/linux-4.1.27/include/net/ |
D | request_sock.h | 33 struct kmem_cache *slab; member 74 struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC); in reqsk_alloc() 107 kmem_cache_free(req->rsk_ops->slab, req); in reqsk_free()
|
D | sock.h | 1018 struct kmem_cache *slab; member
|
/linux-4.1.27/arch/unicore32/mm/ |
D | init.c | 63 int shared = 0, cached = 0, slab = 0, i; in show_mem() local 87 slab++; in show_mem() 99 printk(KERN_DEFAULT "%d slab pages\n", slab); in show_mem()
|
/linux-4.1.27/arch/ia64/sn/kernel/sn2/ |
D | sn_hwperf.c | 90 int *rack, int *bay, int *slot, int *slab) in sn_hwperf_location_to_bpos() argument 96 rack, &type, bay, slab) == 4) in sn_hwperf_location_to_bpos() 100 rack, &type, bay, slot, slab) != 5) in sn_hwperf_location_to_bpos() 111 int rack, bay, slot, slab; in sn_hwperf_geoid_to_cnode() local 114 if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab)) in sn_hwperf_geoid_to_cnode() 129 slot == this_slot && slab == this_slab) { in sn_hwperf_geoid_to_cnode()
|
/linux-4.1.27/Documentation/DocBook/ |
D | .kernel-api.xml.cmd | 2 …ib/crc-ccitt.c include/linux/idr.h lib/idr.c lib/idr.c include/linux/slab.h mm/slab.c mm/util.c ar…
|
/linux-4.1.27/Documentation/fault-injection/ |
D | fault-injection.txt | 12 injects slab allocation failures. (kmalloc(), kmem_cache_alloc(), ...) 169 o Inject slab allocation failures into module init/exit code 252 Run a command "make -C tools/testing/selftests/ run_tests" with injecting slab 264 Same as above except to inject page allocation failure instead of slab
|
/linux-4.1.27/Documentation/zh_CN/ |
D | magic-number.txt | 99 RED_MAGIC2 0x170fc2a5 (any) mm/slab.c 114 SLAB_C_MAGIC 0x4f17a36d kmem_cache mm/slab.c 122 RED_MAGIC1 0x5a2cf071 (any) mm/slab.c
|
/linux-4.1.27/tools/perf/Documentation/ |
D | perf-kmem.txt | 49 --slab::
|
/linux-4.1.27/Documentation/ |
D | magic-number.txt | 99 RED_MAGIC2 0x170fc2a5 (any) mm/slab.c 114 SLAB_C_MAGIC 0x4f17a36d kmem_cache mm/slab.c 122 RED_MAGIC1 0x5a2cf071 (any) mm/slab.c
|
D | kmemcheck.txt | 203 kmemcheck in such a way that the slab caches which are under SLUB debugging 211 slab cache, and with kmemcheck tracking all the other caches. This is advanced 486 a - unallocated (memory has been allocated by the slab layer, but has not 488 f - freed (memory has been allocated by the slab layer, but has been freed 745 If a slab cache is set up using the SLAB_NOTRACK flag, it will never return 748 If a slab cache is NOT set up using the SLAB_NOTRACK flag, callers can still
|
D | SubmitChecklist | 81 21: Has been checked with injection of at least slab and page-allocation
|
D | nommu-mmap.txt | 189 of the mapping exceeds the size of the slab object currently occupied by the 190 memory to which the mapping refers, or if a smaller slab object could be used.
|
D | BUG-HUNTING | 238 with this look at mm/slab.c and search for POISON_INUSE. When using this an
|
D | kernel-per-CPU-kthreads.txt | 179 CONFIG_SLAB=y, thus avoiding the slab allocator's periodic
|
D | kernel-parameters.txt | 2312 caches in the slab allocator. Saves per-node memory, 3391 culprit if slab objects become corrupted. Enabling 3404 The minimum number of objects per slab. SLUB will 3405 increase the slab order up to slub_max_order to 3406 generate a sufficiently large slab able to contain
|
/linux-4.1.27/Documentation/ja_JP/ |
D | SubmitChecklist | 97 21: 少なくともslabアロケーションとpageアロケーションに失敗した場合の
|
/linux-4.1.27/drivers/gpu/drm/i915/ |
D | i915_dma.c | 1009 if (dev_priv->slab) in i915_driver_load() 1010 kmem_cache_destroy(dev_priv->slab); in i915_driver_load() 1094 if (dev_priv->slab) in i915_driver_unload() 1095 kmem_cache_destroy(dev_priv->slab); in i915_driver_unload()
|
D | i915_gem.c | 381 return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL); in i915_gem_object_alloc() 387 kmem_cache_free(dev_priv->slab, obj); in i915_gem_object_free() 4955 dev_priv->slab = in i915_gem_load()
|
D | i915_drv.h | 1566 struct kmem_cache *slab; member
|
/linux-4.1.27/mm/ |
D | memory-failure.c | 820 #define slab (1UL << PG_slab) macro 840 { slab, slab, MSG_SLAB, me_kernel }, 877 #undef slab
|
D | Makefile | 52 obj-$(CONFIG_SLAB) += slab.o
|
D | slab.c | 270 #define MAKE_LIST(cachep, listp, slab, nodeid) \ argument 273 list_splice(&get_node(cachep, nodeid)->slab, listp); \
|
D | Kconfig | 591 zsmalloc is a slab-based memory allocator designed to store
|
/linux-4.1.27/Documentation/trace/ |
D | events-kmem.txt | 22 justified, particularly if kmalloc slab pages are getting significantly 36 of writing, no information is available on what slab is being allocated from,
|
/linux-4.1.27/include/linux/ |
D | mm_types.h | 134 struct slab *slab_page; /* slab fields */
|
D | page-flags.h | 218 __PAGEFLAG(Slab, slab)
|
D | bio.h | 681 struct kmem_cache *slab; member
|
/linux-4.1.27/tools/testing/fault-injection/ |
D | failcmd.sh | 58 inject slab allocation failures
|
/linux-4.1.27/drivers/scsi/ |
D | scsi_lib.c | 47 struct kmem_cache *slab; member 2282 sgp->slab = kmem_cache_create(sgp->name, size, 0, in scsi_init_queue() 2284 if (!sgp->slab) { in scsi_init_queue() 2291 sgp->slab); in scsi_init_queue() 2306 if (sgp->slab) in scsi_init_queue() 2307 kmem_cache_destroy(sgp->slab); in scsi_init_queue() 2323 kmem_cache_destroy(sgp->slab); in scsi_exit_queue()
|
/linux-4.1.27/Documentation/sysctl/ |
D | vm.txt | 190 reclaimable slab objects like dentries and inodes. Once dropped, their 195 To free reclaimable slab objects (includes dentries and inodes): 197 To free slab objects and pagecache: 446 than this percentage of pages in a zone are reclaimable slab pages. 447 This insures that the slab growth stays under control even in NUMA 452 Note that slab reclaim is triggered in a per zone / node fashion. 453 The process of reclaiming slab memory is currently not node specific
|
/linux-4.1.27/fs/befs/ |
D | ChangeLog | 159 * Made inode allocations use a slab cache 170 * Fixed a couple of compile warnings due to use of malloc.h, when slab.h 408 Needed to include <linux/slab.h> in most files
|
/linux-4.1.27/fs/nfsd/ |
D | state.h | 593 struct kmem_cache *slab);
|
D | nfs4state.c | 557 struct kmem_cache *slab) in nfs4_alloc_stid() argument 562 stid = kmem_cache_zalloc(slab, GFP_KERNEL); in nfs4_alloc_stid() 590 kmem_cache_free(slab, stid); in nfs4_alloc_stid() 3316 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4… in alloc_stateowner() argument 3320 sop = kmem_cache_alloc(slab, GFP_KERNEL); in alloc_stateowner() 3326 kmem_cache_free(slab, sop); in alloc_stateowner()
|
/linux-4.1.27/Documentation/cgroups/ |
D | memory.txt | 117 3. Kernel user memory accounting and slab control 303 to trigger slab reclaim when those limits are reached. 311 * slab pages: pages allocated by the SLAB or SLUB allocator are tracked. A copy 314 skipped while the cache is being created. All objects in a slab page should
|
D | cpusets.txt | 179 - cpuset.memory_spread_slab flag: if set, spread slab cache evenly on allowed nodes 316 then the kernel will spread some file system related slab caches, 333 or slab caches to ignore the task's NUMA mempolicy and be spread 355 PFA_SPREAD_SLAB, and appropriately marked slab caches will allocate
|
/linux-4.1.27/net/ipv6/ |
D | af_inet6.c | 170 WARN_ON(!answer_prot->slab); in inet6_create()
|
/linux-4.1.27/init/ |
D | Kconfig | 1668 SLUB sysfs support. /sys/slab will not exist and there will be 1687 This option allows to select a slab allocator. 1692 The regular slab allocator that is established and known to work 1699 SLUB is a slab allocator that minimizes cache line usage 1704 a slab allocator.
|
/linux-4.1.27/net/ipv4/ |
D | af_inet.c | 322 WARN_ON(!answer_prot->slab); in inet_create()
|
/linux-4.1.27/Documentation/filesystems/ |
D | proc.txt | 734 The slabinfo file gives information about memory usage at the slab level. 735 Linux uses slab pools for memory management above page level in version 2.2. 736 Commonly used objects have their own slab pool (such as network buffers, 853 slab will be reclaimable, due to items being in use. The
|
/linux-4.1.27/lib/ |
D | Kconfig.debug | 447 bool "Debug slab memory allocations" 505 of finding leaks due to the slab objects poisoning.
|
/linux-4.1.27/Documentation/networking/ |
D | packet_mmap.txt | 310 a pool of pre-determined sizes. This pool of memory is maintained by the slab
|
/linux-4.1.27/ |
D | CREDITS | 14 D: SLOB slab allocator 3417 D: slab, pipe, select.
|
/linux-4.1.27/Documentation/block/ |
D | biodoc.txt | 607 case of bio, these routines make use of the standard slab allocator.
|