/linux-4.1.27/drivers/md/persistent-data/ |
D | dm-array.c | 25 __le32 max_entries; member 47 bh_le->csum = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries, in array_block_prepare_for_write() 66 csum_disk = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries, in array_block_check() 153 uint32_t max_entries, in alloc_ablock() argument 163 (*ab)->max_entries = cpu_to_le32(max_entries); in alloc_ablock() 182 BUG_ON(new_nr > le32_to_cpu(ab->max_entries)); in fill_ablock() 206 BUG_ON(new_nr > le32_to_cpu(ab->max_entries)); in trim_ablock() 338 uint32_t max_entries, in insert_new_ablock() argument 346 r = alloc_ablock(info, size_of_block, max_entries, &block, &ab); in insert_new_ablock() 359 unsigned max_entries, const void *value, in insert_full_ablocks() argument [all …]
|
D | dm-btree-remove.c | 72 BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries)); in node_shift() 90 BUG_ON(nr_left + shift > le32_to_cpu(left->header.max_entries)); in node_copy() 98 BUG_ON(shift > le32_to_cpu(right->header.max_entries)); in node_copy() 133 return le32_to_cpu(n->header.max_entries) / 3; in merge_threshold() 177 uint32_t max_entries = le32_to_cpu(left->header.max_entries); in shift() local 178 uint32_t r_max_entries = le32_to_cpu(right->header.max_entries); in shift() 180 BUG_ON(max_entries != r_max_entries); in shift() 181 BUG_ON(nr_left - count > max_entries); in shift() 182 BUG_ON(nr_right + count > max_entries); in shift() 271 uint32_t max_entries = le32_to_cpu(left->header.max_entries); in delete_center_node() local [all …]
|
D | dm-btree.c | 88 index >= le32_to_cpu(node->header.max_entries)) { in insert_at() 127 uint32_t max_entries; in dm_btree_empty() local 134 max_entries = calc_max_entries(info->value_type.size, block_size); in dm_btree_empty() 140 n->header.max_entries = cpu_to_le32(max_entries); in dm_btree_empty() 449 rn->header.max_entries = ln->header.max_entries; in btree_split_sibling() 539 ln->header.max_entries = pn->header.max_entries; in btree_split_beneath() 544 rn->header.max_entries = pn->header.max_entries; in btree_split_beneath() 559 pn->header.max_entries = cpu_to_le32( in btree_split_beneath() 624 if (node->header.nr_entries == node->header.max_entries) { in btree_insert_raw()
|
D | dm-btree-internal.h | 34 __le32 max_entries; member 115 return &n->keys[le32_to_cpu(n->header.max_entries)]; in value_base()
|
D | dm-space-map-common.c | 243 if (nr_indexes > ll->max_entries(ll)) { in sm_ll_extend() 608 ll->max_entries = metadata_ll_max_entries; in sm_ll_new_metadata() 644 ll->max_entries = metadata_ll_max_entries; in sm_ll_open_metadata() 704 ll->max_entries = disk_ll_max_entries; in sm_ll_new_disk() 740 ll->max_entries = disk_ll_max_entries; in sm_ll_open_disk()
|
D | dm-btree-spine.c | 65 (sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) { in node_check() 70 if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) { in node_check()
|
D | dm-space-map-common.h | 79 max_index_entries_fn max_entries; member
|
D | dm-block-manager.c | 82 t->max_entries = MAX_STACK; in __add_holder() 114 t.max_entries = MAX_STACK; in __check_holder()
|
/linux-4.1.27/kernel/bpf/ |
D | arraymap.c | 31 if (attr->max_entries == 0 || attr->key_size != 4 || in array_map_alloc() 39 attr->max_entries > (U32_MAX - sizeof(*array)) / elem_size) in array_map_alloc() 42 array_size = sizeof(*array) + attr->max_entries * elem_size; in array_map_alloc() 55 array->map.max_entries = attr->max_entries; in array_map_alloc() 68 if (index >= array->map.max_entries) in array_map_lookup_elem() 81 if (index >= array->map.max_entries) { in array_map_get_next_key() 86 if (index == array->map.max_entries - 1) in array_map_get_next_key() 104 if (index >= array->map.max_entries) in array_map_update_elem()
|
D | hashtab.c | 47 htab->map.max_entries = attr->max_entries; in htab_map_alloc() 53 if (htab->map.max_entries == 0 || htab->map.key_size == 0 || in htab_map_alloc() 58 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); in htab_map_alloc() 239 if (!l_old && unlikely(htab->count >= map->max_entries)) { in htab_map_update_elem()
|
D | syscall.c | 87 #define BPF_MAP_CREATE_LAST_FIELD max_entries
|
/linux-4.1.27/arch/x86/kernel/ |
D | stacktrace.c | 31 if (trace->nr_entries < trace->max_entries) in __save_stack_address() 64 if (trace->nr_entries < trace->max_entries) in save_stack_trace() 72 if (trace->nr_entries < trace->max_entries) in save_stack_trace_regs() 79 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk() 113 if (trace->nr_entries < trace->max_entries) in __save_stack_trace_user() 116 while (trace->nr_entries < trace->max_entries) { in __save_stack_trace_user() 143 if (trace->nr_entries < trace->max_entries) in save_stack_trace_user()
|
/linux-4.1.27/drivers/acpi/ |
D | tables.c | 221 int entry_id, unsigned int max_entries) in acpi_parse_entries() argument 251 && (!max_entries || count < max_entries)) { in acpi_parse_entries() 271 if (max_entries && count > max_entries) { in acpi_parse_entries() 273 id, entry_id, count - max_entries, count); in acpi_parse_entries() 284 unsigned int max_entries) in acpi_table_parse_entries() argument 307 entry_id, max_entries); in acpi_table_parse_entries() 315 acpi_tbl_entry_handler handler, unsigned int max_entries) in acpi_table_parse_madt() argument 319 handler, max_entries); in acpi_table_parse_madt()
|
D | numa.c | 269 acpi_tbl_entry_handler handler, unsigned int max_entries) in acpi_table_parse_srat() argument 273 handler, max_entries); in acpi_table_parse_srat()
|
/linux-4.1.27/fs/ext4/ |
D | migrate.c | 124 unsigned long max_entries = inode->i_sb->s_blocksize >> 2; in update_ind_extent_range() local 131 for (i = 0; i < max_entries; i++) { in update_ind_extent_range() 153 unsigned long max_entries = inode->i_sb->s_blocksize >> 2; in update_dind_extent_range() local 160 for (i = 0; i < max_entries; i++) { in update_dind_extent_range() 168 lb->curr_block += max_entries; in update_dind_extent_range() 183 unsigned long max_entries = inode->i_sb->s_blocksize >> 2; in update_tind_extent_range() local 190 for (i = 0; i < max_entries; i++) { in update_tind_extent_range() 198 lb->curr_block += max_entries * max_entries; in update_tind_extent_range() 232 unsigned long max_entries = inode->i_sb->s_blocksize >> 2; in free_dind_blocks() local 239 for (i = 0; i < max_entries; i++) { in free_dind_blocks() [all …]
|
/linux-4.1.27/tools/perf/util/ |
D | cpumap.c | 48 int max_entries = 0; in cpu_map__read() local 61 if (new_max >= max_entries) { in cpu_map__read() 62 max_entries = new_max + MAX_NR_CPUS / 2; in cpu_map__read() 63 tmp = realloc(tmp_cpus, max_entries * sizeof(int)); in cpu_map__read() 72 if (nr_cpus == max_entries) { in cpu_map__read() 73 max_entries += MAX_NR_CPUS; in cpu_map__read() 74 tmp = realloc(tmp_cpus, max_entries * sizeof(int)); in cpu_map__read() 119 int max_entries = 0; in cpu_map__new() local 154 if (nr_cpus == max_entries) { in cpu_map__new() 155 max_entries += MAX_NR_CPUS; in cpu_map__new() [all …]
|
/linux-4.1.27/arch/sh/kernel/ |
D | stacktrace.c | 40 if (trace->nr_entries < trace->max_entries) in save_stack_address() 54 if (trace->nr_entries < trace->max_entries) in save_stack_trace() 75 if (trace->nr_entries < trace->max_entries) in save_stack_address_nosched() 89 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk()
|
/linux-4.1.27/arch/arm/kernel/ |
D | stacktrace.c | 86 if (trace->nr_entries >= trace->max_entries) in save_trace() 105 return trace->nr_entries >= trace->max_entries; in save_trace() 127 if (trace->nr_entries < trace->max_entries) in __save_stack_trace() 146 if (trace->nr_entries < trace->max_entries) in __save_stack_trace() 165 if (trace->nr_entries < trace->max_entries) in save_stack_trace_regs()
|
/linux-4.1.27/arch/parisc/kernel/ |
D | stacktrace.c | 36 while (trace->nr_entries < trace->max_entries) { in dump_trace() 52 if (trace->nr_entries < trace->max_entries) in save_stack_trace() 60 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk()
|
/linux-4.1.27/arch/mips/kernel/ |
D | stacktrace.c | 27 if (trace->nr_entries >= trace->max_entries) in save_raw_context_stack() 54 if (trace->nr_entries >= trace->max_entries) in save_context_stack() 77 WARN_ON(trace->nr_entries || !trace->max_entries); in save_stack_trace_tsk()
|
/linux-4.1.27/tools/lib/lockdep/uinclude/linux/ |
D | stacktrace.h | 7 unsigned int nr_entries, max_entries; member 19 backtrace((void **)(trace)->entries, (trace)->max_entries))
|
/linux-4.1.27/arch/s390/kernel/ |
D | stacktrace.c | 34 if (trace->nr_entries >= trace->max_entries) in save_context_stack() 56 if (trace->nr_entries >= trace->max_entries) in save_context_stack() 93 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk()
|
/linux-4.1.27/samples/bpf/ |
D | libbpf.c | 22 int max_entries) in bpf_create_map() argument 28 .max_entries = max_entries in bpf_create_map()
|
D | tracex2_kern.c | 17 .max_entries = 1024, 69 .max_entries = 64,
|
D | tracex3_kern.c | 17 .max_entries = 4096, 48 .max_entries = SLOTS,
|
D | sockex1_kern.c | 11 .max_entries = 256,
|
D | tracex4_kern.c | 21 .max_entries = 1000000,
|
D | bpf_helpers.h | 43 unsigned int max_entries; member
|
D | libbpf.h | 8 int max_entries);
|
D | sockex2_kern.c | 195 .max_entries = 1024,
|
D | bpf_load.c | 130 maps[i].max_entries); in load_maps()
|
/linux-4.1.27/arch/metag/kernel/ |
D | stacktrace.c | 139 return trace->nr_entries >= trace->max_entries; in save_trace() 157 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk() 178 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk()
|
/linux-4.1.27/arch/um/kernel/ |
D | stacktrace.c | 53 if (trace->nr_entries >= trace->max_entries) in save_addr() 66 if (trace->nr_entries < trace->max_entries) in __save_stack_trace()
|
/linux-4.1.27/arch/sparc/kernel/ |
D | stacktrace.c | 63 trace->max_entries) in __save_stack_trace() 70 } while (trace->nr_entries < trace->max_entries); in __save_stack_trace()
|
/linux-4.1.27/arch/arm64/kernel/ |
D | stacktrace.c | 93 return trace->nr_entries >= trace->max_entries; in save_trace() 117 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk()
|
/linux-4.1.27/arch/unicore32/kernel/ |
D | stacktrace.c | 94 return trace->nr_entries >= trace->max_entries; in save_trace() 122 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk()
|
/linux-4.1.27/arch/arc/kernel/ |
D | stacktrace.c | 184 if (trace->nr_entries >= trace->max_entries) in __collect_all() 202 if (trace->nr_entries >= trace->max_entries) in __collect_all_but_sched()
|
/linux-4.1.27/arch/x86/mm/kmemcheck/ |
D | error.c | 186 e->trace.max_entries = ARRAY_SIZE(e->trace_entries); in kmemcheck_error_save() 222 e->trace.max_entries = ARRAY_SIZE(e->trace_entries); in kmemcheck_error_save_bug()
|
/linux-4.1.27/arch/ia64/kernel/ |
D | stacktrace.c | 25 if (trace->nr_entries == trace->max_entries) in ia64_do_save_stack()
|
/linux-4.1.27/arch/avr32/kernel/ |
D | stacktrace.c | 43 if (trace->nr_entries >= trace->max_entries) in save_stack_trace()
|
/linux-4.1.27/arch/hexagon/kernel/ |
D | stacktrace.c | 54 if (trace->nr_entries >= trace->max_entries) in save_stack_trace()
|
/linux-4.1.27/arch/blackfin/kernel/ |
D | stacktrace.c | 41 if (trace->nr_entries >= trace->max_entries) in save_stack_trace()
|
/linux-4.1.27/include/linux/ |
D | stacktrace.h | 11 unsigned int nr_entries, max_entries; member
|
D | bpf.h | 34 u32 max_entries; member
|
D | acpi.h | 138 int entry_id, unsigned int max_entries); 142 unsigned int max_entries); 145 unsigned int max_entries);
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | stacktrace.c | 42 if (trace->nr_entries >= trace->max_entries) in save_context_stack()
|
/linux-4.1.27/kernel/ |
D | backtracetest.c | 58 trace.max_entries = ARRAY_SIZE(entries); in backtrace_test_saved()
|
D | latencytop.c | 151 trace.max_entries = LT_BACKTRACEDEPTH; in store_stacktrace()
|
/linux-4.1.27/fs/xfs/ |
D | xfs_acl.c | 41 int max_entries) in xfs_acl_from_disk() argument 49 if (count > max_entries) in xfs_acl_from_disk()
|
/linux-4.1.27/include/uapi/linux/ |
D | bpf.h | 138 __u32 max_entries; /* max number of entries in a map */ member
|
/linux-4.1.27/arch/xtensa/kernel/ |
D | stacktrace.c | 68 return trace->nr_entries >= trace->max_entries; in stack_trace_cb()
|
/linux-4.1.27/drivers/s390/scsi/ |
D | zfcp_fc.c | 653 struct zfcp_adapter *adapter, int max_entries) in zfcp_fc_eval_gpn_ft() argument 682 for (x = 1; x < max_entries && !last; x++) { in zfcp_fc_eval_gpn_ft() 731 int chain, max_entries, buf_num, max_bytes; in zfcp_fc_scan_ports() local 737 max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE; in zfcp_fc_scan_ports() 754 ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries); in zfcp_fc_scan_ports()
|
/linux-4.1.27/arch/microblaze/kernel/ |
D | unwind.c | 244 if (trace->nr_entries >= trace->max_entries) in microblaze_unwind_inner()
|
/linux-4.1.27/drivers/gpu/drm/i915/ |
D | i915_gem_gtt.c | 1834 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; in gen8_ggtt_clear_range() local 1837 if (WARN(num_entries > max_entries, in gen8_ggtt_clear_range() 1839 first_entry, num_entries, max_entries)) in gen8_ggtt_clear_range() 1840 num_entries = max_entries; in gen8_ggtt_clear_range() 1860 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; in gen6_ggtt_clear_range() local 1863 if (WARN(num_entries > max_entries, in gen6_ggtt_clear_range() 1865 first_entry, num_entries, max_entries)) in gen6_ggtt_clear_range() 1866 num_entries = max_entries; in gen6_ggtt_clear_range()
|
/linux-4.1.27/lib/ |
D | fault-inject.c | 79 trace.max_entries = depth; in fail_stacktrace()
|
D | dma-debug.c | 671 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; in dma_entry_alloc()
|
/linux-4.1.27/mm/ |
D | page_owner.c | 65 .max_entries = ARRAY_SIZE(page_ext->trace_entries), in __set_page_owner()
|
D | kmemleak.c | 507 stack_trace.max_entries = MAX_TRACE; in __save_stack_trace()
|
D | slub.c | 522 trace.max_entries = TRACK_ADDRS_COUNT; in set_track()
|
/linux-4.1.27/drivers/net/wireless/p54/ |
D | eeprom.c | 85 size_t max_entries; member 252 if ((i < 0) && (list->entries < list->max_entries)) { in p54_update_channel_param() 354 list->max_entries = max_channel_num; in p54_generate_channel_lists()
|
/linux-4.1.27/kernel/trace/ |
D | trace_stack.c | 37 .max_entries = STACK_TRACE_ENTRIES - 1,
|
D | trace.c | 1827 trace.max_entries = FTRACE_STACK_MAX_ENTRIES; in __ftrace_trace_stack() 1854 trace.max_entries = FTRACE_STACK_ENTRIES; in __ftrace_trace_stack() 1961 trace.max_entries = FTRACE_STACK_ENTRIES; in ftrace_trace_userstack()
|
/linux-4.1.27/drivers/net/ethernet/neterion/vxge/ |
D | vxge-config.c | 3820 u32 max_entries; in vxge_hw_vpath_rts_rth_itable_set() local 3829 max_entries = (((u32)1) << itable_size); in vxge_hw_vpath_rts_rth_itable_set() 3837 for (j = 0; j < max_entries; j++) { in vxge_hw_vpath_rts_rth_itable_set() 3852 for (j = 0; j < max_entries; j++) { in vxge_hw_vpath_rts_rth_itable_set() 3874 for (j = 0; j < max_entries;) { in vxge_hw_vpath_rts_rth_itable_set() 3879 while (j < max_entries) { in vxge_hw_vpath_rts_rth_itable_set() 3890 while (j < max_entries) { in vxge_hw_vpath_rts_rth_itable_set() 3901 while (j < max_entries) { in vxge_hw_vpath_rts_rth_itable_set() 3912 while (j < max_entries) { in vxge_hw_vpath_rts_rth_itable_set()
|
/linux-4.1.27/arch/tile/kernel/ |
D | stack.c | 496 if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET) in save_stack_trace_tsk()
|
/linux-4.1.27/include/linux/ceph/ |
D | ceph_fs.h | 362 __le32 max_entries; /* how many dentries to grab */ member
|
/linux-4.1.27/kernel/locking/ |
D | lockdep.c | 395 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; in save_trace() 413 trace->max_entries = trace->nr_entries; in save_trace() 445 .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
|
/linux-4.1.27/drivers/video/fbdev/omap2/omapfb/ |
D | omapfb-main.c | 1477 static int omapfb_parse_vram_param(const char *param, int max_entries, in omapfb_parse_vram_param() argument 1498 if (fbnum >= max_entries) in omapfb_parse_vram_param()
|
/linux-4.1.27/fs/proc/ |
D | base.c | 285 trace.max_entries = MAX_STACK_TRACE_DEPTH; in proc_pid_stack()
|
/linux-4.1.27/Documentation/networking/ |
D | filter.txt | 1106 using attr->map_type, attr->key_size, attr->value_size, attr->max_entries
|
/linux-4.1.27/fs/ceph/ |
D | mds_client.c | 1652 req->r_args.readdir.max_entries = cpu_to_le32(num_entries); in ceph_alloc_readdir_reply_buffer()
|