/linux-4.4.14/drivers/md/persistent-data/ |
D | dm-array.c | 25 __le32 max_entries; member 47 bh_le->csum = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries, in array_block_prepare_for_write() 66 csum_disk = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries, in array_block_check() 153 uint32_t max_entries, in alloc_ablock() argument 163 (*ab)->max_entries = cpu_to_le32(max_entries); in alloc_ablock() 182 BUG_ON(new_nr > le32_to_cpu(ab->max_entries)); in fill_ablock() 206 BUG_ON(new_nr > le32_to_cpu(ab->max_entries)); in trim_ablock() 338 uint32_t max_entries, in insert_new_ablock() argument 346 r = alloc_ablock(info, size_of_block, max_entries, &block, &ab); in insert_new_ablock() 359 unsigned max_entries, const void *value, in insert_full_ablocks() argument [all …]
|
D | dm-btree-remove.c | 72 BUG_ON(nr_entries + shift > le32_to_cpu(n->header.max_entries)); in node_shift() 90 BUG_ON(nr_left + shift > le32_to_cpu(left->header.max_entries)); in node_copy() 98 BUG_ON(shift > le32_to_cpu(right->header.max_entries)); in node_copy() 133 return le32_to_cpu(n->header.max_entries) / 3; in merge_threshold() 177 uint32_t max_entries = le32_to_cpu(left->header.max_entries); in shift() local 178 uint32_t r_max_entries = le32_to_cpu(right->header.max_entries); in shift() 180 BUG_ON(max_entries != r_max_entries); in shift() 181 BUG_ON(nr_left - count > max_entries); in shift() 182 BUG_ON(nr_right + count > max_entries); in shift() 268 uint32_t max_entries = le32_to_cpu(left->header.max_entries); in delete_center_node() local [all …]
|
D | dm-btree.c | 93 index >= le32_to_cpu(node->header.max_entries)) { in insert_at() 132 uint32_t max_entries; in dm_btree_empty() local 139 max_entries = calc_max_entries(info->value_type.size, block_size); in dm_btree_empty() 145 n->header.max_entries = cpu_to_le32(max_entries); in dm_btree_empty() 546 rn->header.max_entries = ln->header.max_entries; in btree_split_sibling() 638 ln->header.max_entries = pn->header.max_entries; in btree_split_beneath() 643 rn->header.max_entries = pn->header.max_entries; in btree_split_beneath() 658 pn->header.max_entries = cpu_to_le32( in btree_split_beneath() 723 if (node->header.nr_entries == node->header.max_entries) { in btree_insert_raw()
|
D | dm-btree-internal.h | 34 __le32 max_entries; member 115 return &n->keys[le32_to_cpu(n->header.max_entries)]; in value_base()
|
D | dm-space-map-common.c | 243 if (nr_indexes > ll->max_entries(ll)) { in sm_ll_extend() 608 ll->max_entries = metadata_ll_max_entries; in sm_ll_new_metadata() 644 ll->max_entries = metadata_ll_max_entries; in sm_ll_open_metadata() 704 ll->max_entries = disk_ll_max_entries; in sm_ll_new_disk() 740 ll->max_entries = disk_ll_max_entries; in sm_ll_open_disk()
|
D | dm-btree-spine.c | 65 (sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) { in node_check() 70 if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) { in node_check()
|
D | dm-space-map-common.h | 79 max_index_entries_fn max_entries; member
|
D | dm-block-manager.c | 82 t->max_entries = MAX_STACK; in __add_holder() 114 t.max_entries = MAX_STACK; in __check_holder()
|
/linux-4.4.14/drivers/acpi/ |
D | tables.c | 235 unsigned int max_entries) in acpi_parse_entries_array() argument 265 if (max_entries && count >= max_entries) in acpi_parse_entries_array() 294 if (max_entries && count > max_entries) { in acpi_parse_entries_array() 296 id, proc->id, count - max_entries, count); in acpi_parse_entries_array() 307 int entry_id, unsigned int max_entries) in acpi_parse_entries() argument 315 &proc, 1, max_entries); in acpi_parse_entries() 322 unsigned int max_entries) in acpi_table_parse_entries_array() argument 345 proc, proc_num, max_entries); in acpi_table_parse_entries_array() 356 unsigned int max_entries) in acpi_table_parse_entries() argument 364 max_entries); in acpi_table_parse_entries() [all …]
|
D | numa.c | 311 acpi_tbl_entry_handler handler, unsigned int max_entries) in acpi_table_parse_srat() argument 315 handler, max_entries); in acpi_table_parse_srat()
|
/linux-4.4.14/kernel/bpf/ |
D | arraymap.c | 27 if (attr->max_entries == 0 || attr->key_size != 4 || in array_map_alloc() 41 attr->max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size) in array_map_alloc() 44 array_size = sizeof(*array) + attr->max_entries * elem_size; in array_map_alloc() 57 array->map.max_entries = attr->max_entries; in array_map_alloc() 70 if (index >= array->map.max_entries) in array_map_lookup_elem() 83 if (index >= array->map.max_entries) { in array_map_get_next_key() 88 if (index == array->map.max_entries - 1) in array_map_get_next_key() 106 if (index >= array->map.max_entries) in array_map_update_elem() 176 for (i = 0; i < array->map.max_entries; i++) in fd_array_map_free() 197 if (index >= array->map.max_entries) in fd_array_map_update_elem() [all …]
|
D | hashtab.c | 47 htab->map.max_entries = attr->max_entries; in htab_map_alloc() 53 if (htab->map.max_entries == 0 || htab->map.key_size == 0 || in htab_map_alloc() 58 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); in htab_map_alloc() 86 (u64) htab->elem_size * htab->map.max_entries >= in htab_map_alloc() 92 htab->elem_size * htab->map.max_entries, in htab_map_alloc() 259 if (!l_old && unlikely(htab->count >= map->max_entries)) { in htab_map_update_elem()
|
D | syscall.c | 134 #define BPF_MAP_CREATE_LAST_FIELD max_entries
|
D | core.c | 451 if (unlikely(index >= array->map.max_entries)) in __bpf_prog_run()
|
/linux-4.4.14/arch/x86/kernel/ |
D | stacktrace.c | 31 if (trace->nr_entries < trace->max_entries) in __save_stack_address() 64 if (trace->nr_entries < trace->max_entries) in save_stack_trace() 72 if (trace->nr_entries < trace->max_entries) in save_stack_trace_regs() 79 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk() 113 if (trace->nr_entries < trace->max_entries) in __save_stack_trace_user() 116 while (trace->nr_entries < trace->max_entries) { in __save_stack_trace_user() 143 if (trace->nr_entries < trace->max_entries) in save_stack_trace_user()
|
/linux-4.4.14/fs/ext4/ |
D | migrate.c | 124 unsigned long max_entries = inode->i_sb->s_blocksize >> 2; in update_ind_extent_range() local 131 for (i = 0; i < max_entries; i++) { in update_ind_extent_range() 153 unsigned long max_entries = inode->i_sb->s_blocksize >> 2; in update_dind_extent_range() local 160 for (i = 0; i < max_entries; i++) { in update_dind_extent_range() 168 lb->curr_block += max_entries; in update_dind_extent_range() 183 unsigned long max_entries = inode->i_sb->s_blocksize >> 2; in update_tind_extent_range() local 190 for (i = 0; i < max_entries; i++) { in update_tind_extent_range() 198 lb->curr_block += max_entries * max_entries; in update_tind_extent_range() 232 unsigned long max_entries = inode->i_sb->s_blocksize >> 2; in free_dind_blocks() local 239 for (i = 0; i < max_entries; i++) { in free_dind_blocks() [all …]
|
/linux-4.4.14/arch/sh/kernel/ |
D | stacktrace.c | 40 if (trace->nr_entries < trace->max_entries) in save_stack_address() 54 if (trace->nr_entries < trace->max_entries) in save_stack_trace() 75 if (trace->nr_entries < trace->max_entries) in save_stack_address_nosched() 89 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk()
|
/linux-4.4.14/tools/perf/util/ |
D | cpumap.c | 51 int max_entries = 0; in cpu_map__read() local 64 if (new_max >= max_entries) { in cpu_map__read() 65 max_entries = new_max + MAX_NR_CPUS / 2; in cpu_map__read() 66 tmp = realloc(tmp_cpus, max_entries * sizeof(int)); in cpu_map__read() 75 if (nr_cpus == max_entries) { in cpu_map__read() 76 max_entries += MAX_NR_CPUS; in cpu_map__read() 77 tmp = realloc(tmp_cpus, max_entries * sizeof(int)); in cpu_map__read() 122 int max_entries = 0; in cpu_map__new() local 157 if (nr_cpus == max_entries) { in cpu_map__new() 158 max_entries += MAX_NR_CPUS; in cpu_map__new() [all …]
|
/linux-4.4.14/arch/arm/kernel/ |
D | stacktrace.c | 86 if (trace->nr_entries >= trace->max_entries) in save_trace() 105 return trace->nr_entries >= trace->max_entries; in save_trace() 127 if (trace->nr_entries < trace->max_entries) in __save_stack_trace() 146 if (trace->nr_entries < trace->max_entries) in __save_stack_trace() 165 if (trace->nr_entries < trace->max_entries) in save_stack_trace_regs()
|
/linux-4.4.14/arch/parisc/kernel/ |
D | stacktrace.c | 36 while (trace->nr_entries < trace->max_entries) { in dump_trace() 52 if (trace->nr_entries < trace->max_entries) in save_stack_trace() 60 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk()
|
/linux-4.4.14/tools/lib/bpf/ |
D | bpf.c | 44 int value_size, int max_entries) in bpf_create_map() argument 53 attr.max_entries = max_entries; in bpf_create_map()
|
D | bpf.h | 14 int max_entries);
|
D | libbpf.h | 101 unsigned int max_entries; member
|
D | libbpf.c | 701 def.max_entries); in bpf_object__create_maps()
|
/linux-4.4.14/arch/mips/kernel/ |
D | stacktrace.c | 28 if (trace->nr_entries >= trace->max_entries) in save_raw_context_stack() 56 if (trace->nr_entries >= trace->max_entries) in save_context_stack() 80 WARN_ON(trace->nr_entries || !trace->max_entries); in save_stack_trace_tsk()
|
/linux-4.4.14/tools/lib/lockdep/uinclude/linux/ |
D | stacktrace.h | 7 unsigned int nr_entries, max_entries; member 19 backtrace((void **)(trace)->entries, (trace)->max_entries))
|
/linux-4.4.14/arch/s390/kernel/ |
D | stacktrace.c | 34 if (trace->nr_entries >= trace->max_entries) in save_context_stack() 56 if (trace->nr_entries >= trace->max_entries) in save_context_stack() 93 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk()
|
/linux-4.4.14/tools/perf/tests/ |
D | bpf-script-example.c | 23 unsigned int max_entries; member 31 .max_entries = 1,
|
/linux-4.4.14/samples/bpf/ |
D | libbpf.c | 22 int max_entries) in bpf_create_map() argument 28 .max_entries = max_entries in bpf_create_map()
|
D | tracex3_kern.c | 17 .max_entries = 4096, 48 .max_entries = SLOTS,
|
D | lathist_kern.c | 25 .max_entries = MAX_CPU, 68 .max_entries = MAX_CPU * MAX_ENTRIES,
|
D | tracex2_kern.c | 17 .max_entries = 1024, 76 .max_entries = 1024,
|
D | sockex3_kern.c | 25 .max_entries = 8, 98 .max_entries = 32, 119 .max_entries = 1024,
|
D | tracex6_kern.c | 10 .max_entries = 32,
|
D | sockex1_kern.c | 11 .max_entries = 256,
|
D | trace_output_kern.c | 10 .max_entries = 2,
|
D | tracex4_kern.c | 21 .max_entries = 1000000,
|
D | tracex5_kern.c | 19 .max_entries = 1024,
|
D | bpf_helpers.h | 61 unsigned int max_entries; member
|
D | libbpf.h | 8 int max_entries);
|
D | sockex2_kern.c | 195 .max_entries = 1024,
|
D | bpf_load.c | 160 maps[i].max_entries); in load_maps()
|
/linux-4.4.14/arch/cris/kernel/ |
D | stacktrace.c | 46 return trace->nr_entries >= trace->max_entries; in save_trace() 66 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk()
|
/linux-4.4.14/arch/metag/kernel/ |
D | stacktrace.c | 139 return trace->nr_entries >= trace->max_entries; in save_trace() 157 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk() 178 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk()
|
/linux-4.4.14/arch/um/kernel/ |
D | stacktrace.c | 53 if (trace->nr_entries >= trace->max_entries) in save_addr() 66 if (trace->nr_entries < trace->max_entries) in __save_stack_trace()
|
/linux-4.4.14/arch/sparc/kernel/ |
D | stacktrace.c | 63 trace->max_entries) in __save_stack_trace() 70 } while (trace->nr_entries < trace->max_entries); in __save_stack_trace()
|
/linux-4.4.14/arch/arm64/kernel/ |
D | stacktrace.c | 93 return trace->nr_entries >= trace->max_entries; in save_trace() 117 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk()
|
/linux-4.4.14/arch/unicore32/kernel/ |
D | stacktrace.c | 94 return trace->nr_entries >= trace->max_entries; in save_trace() 122 if (trace->nr_entries < trace->max_entries) in save_stack_trace_tsk()
|
/linux-4.4.14/arch/arc/kernel/ |
D | stacktrace.c | 182 if (trace->nr_entries >= trace->max_entries) in __collect_all() 200 if (trace->nr_entries >= trace->max_entries) in __collect_all_but_sched()
|
/linux-4.4.14/arch/x86/mm/kmemcheck/ |
D | error.c | 186 e->trace.max_entries = ARRAY_SIZE(e->trace_entries); in kmemcheck_error_save() 222 e->trace.max_entries = ARRAY_SIZE(e->trace_entries); in kmemcheck_error_save_bug()
|
/linux-4.4.14/arch/ia64/kernel/ |
D | stacktrace.c | 25 if (trace->nr_entries == trace->max_entries) in ia64_do_save_stack()
|
/linux-4.4.14/arch/avr32/kernel/ |
D | stacktrace.c | 43 if (trace->nr_entries >= trace->max_entries) in save_stack_trace()
|
/linux-4.4.14/arch/hexagon/kernel/ |
D | stacktrace.c | 54 if (trace->nr_entries >= trace->max_entries) in save_stack_trace()
|
/linux-4.4.14/arch/blackfin/kernel/ |
D | stacktrace.c | 41 if (trace->nr_entries >= trace->max_entries) in save_stack_trace()
|
/linux-4.4.14/include/linux/ |
D | stacktrace.h | 11 unsigned int nr_entries, max_entries; member
|
D | acpi.h | 153 int entry_id, unsigned int max_entries); 157 unsigned int max_entries); 161 unsigned int max_entries); 164 unsigned int max_entries); 167 unsigned int max_entries);
|
D | bpf.h | 38 u32 max_entries; member
|
/linux-4.4.14/kernel/ |
D | backtracetest.c | 58 trace.max_entries = ARRAY_SIZE(entries); in backtrace_test_saved()
|
D | latencytop.c | 151 trace.max_entries = LT_BACKTRACEDEPTH; in store_stacktrace()
|
/linux-4.4.14/arch/powerpc/kernel/ |
D | stacktrace.c | 42 if (trace->nr_entries >= trace->max_entries) in save_context_stack()
|
/linux-4.4.14/fs/xfs/ |
D | xfs_acl.c | 42 int max_entries) in xfs_acl_from_disk() argument 52 if (count > max_entries || XFS_ACL_SIZE(count) != len) in xfs_acl_from_disk()
|
/linux-4.4.14/kernel/trace/ |
D | bpf_trace.c | 195 if (unlikely(index >= array->map.max_entries)) in bpf_perf_event_read() 236 if (unlikely(index >= array->map.max_entries)) in bpf_perf_event_output()
|
D | trace_stack.c | 29 .max_entries = STACK_TRACE_ENTRIES - 1,
|
D | trace.c | 1831 trace.max_entries = FTRACE_STACK_MAX_ENTRIES; in __ftrace_trace_stack() 1858 trace.max_entries = FTRACE_STACK_ENTRIES; in __ftrace_trace_stack() 1958 trace.max_entries = FTRACE_STACK_ENTRIES; in ftrace_trace_userstack()
|
/linux-4.4.14/arch/tile/kernel/ |
D | stack.c | 497 if (i >= trace->max_entries || in save_stack_trace_common() 504 if (i < trace->max_entries) in save_stack_trace_common() 532 else if (trace->nr_entries < trace->max_entries) in save_stack_trace_user()
|
/linux-4.4.14/include/uapi/linux/ |
D | bpf.h | 106 __u32 max_entries; /* max number of entries in a map */ member
|
/linux-4.4.14/drivers/s390/scsi/ |
D | zfcp_fc.c | 653 struct zfcp_adapter *adapter, int max_entries) in zfcp_fc_eval_gpn_ft() argument 682 for (x = 1; x < max_entries && !last; x++) { in zfcp_fc_eval_gpn_ft() 731 int chain, max_entries, buf_num, max_bytes; in zfcp_fc_scan_ports() local 737 max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE; in zfcp_fc_scan_ports() 754 ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries); in zfcp_fc_scan_ports()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | init.c | 209 u32 rcvtids, max_entries; in hfi1_create_ctxtdata() local 272 max_entries = rcd->rcv_array_groups * in hfi1_create_ctxtdata() 274 rcvtids = ((max_entries * hfi1_rcvarr_split) / 100); in hfi1_create_ctxtdata() 1551 u32 max_entries, egrtop, alloced_bytes = 0, idx = 0; in hfi1_setup_eagerbufs() local 1679 max_entries = rcd->rcv_array_groups * dd->rcv_entries.group_size; in hfi1_setup_eagerbufs() 1681 rcd->expected_count = max_entries - egrtop; in hfi1_setup_eagerbufs()
|
D | verbs.h | 1027 u32 max_entries);
|
/linux-4.4.14/arch/microblaze/kernel/ |
D | unwind.c | 244 if (trace->nr_entries >= trace->max_entries) in microblaze_unwind_inner()
|
/linux-4.4.14/arch/xtensa/kernel/ |
D | stacktrace.c | 233 return trace->nr_entries >= trace->max_entries; in stack_trace_cb()
|
/linux-4.4.14/lib/ |
D | fault-inject.c | 79 trace.max_entries = depth; in fail_stacktrace()
|
D | dma-debug.c | 671 entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; in dma_entry_alloc()
|
/linux-4.4.14/mm/ |
D | page_owner.c | 65 .max_entries = ARRAY_SIZE(page_ext->trace_entries), in __set_page_owner()
|
D | kmemleak.c | 525 stack_trace.max_entries = MAX_TRACE; in __save_stack_trace()
|
D | slub.c | 524 trace.max_entries = TRACK_ADDRS_COUNT; in set_track()
|
/linux-4.4.14/drivers/net/wireless/p54/ |
D | eeprom.c | 85 size_t max_entries; member 252 if ((i < 0) && (list->entries < list->max_entries)) { in p54_update_channel_param() 354 list->max_entries = max_channel_num; in p54_generate_channel_lists()
|
/linux-4.4.14/drivers/gpu/drm/i915/ |
D | i915_gem_gtt.c | 2437 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; in gen8_ggtt_clear_range() local 2440 if (WARN(num_entries > max_entries, in gen8_ggtt_clear_range() 2442 first_entry, num_entries, max_entries)) in gen8_ggtt_clear_range() 2443 num_entries = max_entries; in gen8_ggtt_clear_range() 2463 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; in gen6_ggtt_clear_range() local 2466 if (WARN(num_entries > max_entries, in gen6_ggtt_clear_range() 2468 first_entry, num_entries, max_entries)) in gen6_ggtt_clear_range() 2469 num_entries = max_entries; in gen6_ggtt_clear_range()
|
/linux-4.4.14/drivers/net/ethernet/neterion/vxge/ |
D | vxge-config.c | 3820 u32 max_entries; in vxge_hw_vpath_rts_rth_itable_set() local 3829 max_entries = (((u32)1) << itable_size); in vxge_hw_vpath_rts_rth_itable_set() 3837 for (j = 0; j < max_entries; j++) { in vxge_hw_vpath_rts_rth_itable_set() 3852 for (j = 0; j < max_entries; j++) { in vxge_hw_vpath_rts_rth_itable_set() 3874 for (j = 0; j < max_entries;) { in vxge_hw_vpath_rts_rth_itable_set() 3879 while (j < max_entries) { in vxge_hw_vpath_rts_rth_itable_set() 3890 while (j < max_entries) { in vxge_hw_vpath_rts_rth_itable_set() 3901 while (j < max_entries) { in vxge_hw_vpath_rts_rth_itable_set() 3912 while (j < max_entries) { in vxge_hw_vpath_rts_rth_itable_set()
|
/linux-4.4.14/include/linux/ceph/ |
D | ceph_fs.h | 362 __le32 max_entries; /* how many dentries to grab */ member
|
/linux-4.4.14/kernel/locking/ |
D | lockdep.c | 395 trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; in save_trace() 413 trace->max_entries = trace->nr_entries; in save_trace() 445 .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
|
/linux-4.4.14/arch/x86/net/ |
D | bpf_jit_comp.c | 270 offsetof(struct bpf_array, map.max_entries)); in emit_bpf_tail_call()
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_verbs.h | 1047 u32 max_entries);
|
/linux-4.4.14/arch/s390/net/ |
D | bpf_jit_comp.c | 1012 offsetof(struct bpf_array, map.max_entries)); in bpf_jit_insn()
|
/linux-4.4.14/drivers/video/fbdev/omap2/omapfb/ |
D | omapfb-main.c | 1477 static int omapfb_parse_vram_param(const char *param, int max_entries, in omapfb_parse_vram_param() argument 1498 if (fbnum >= max_entries) in omapfb_parse_vram_param()
|
/linux-4.4.14/fs/proc/ |
D | base.c | 477 trace.max_entries = MAX_STACK_TRACE_DEPTH; in proc_pid_stack()
|
/linux-4.4.14/Documentation/networking/ |
D | filter.txt | 1106 using attr->map_type, attr->key_size, attr->value_size, attr->max_entries
|
/linux-4.4.14/fs/ceph/ |
D | mds_client.c | 1695 req->r_args.readdir.max_entries = cpu_to_le32(num_entries); in ceph_alloc_readdir_reply_buffer()
|