Searched refs:entries (Results 1 - 200 of 3253) sorted by relevance

1234567891011>>

/linux-4.4.14/tools/build/feature/
H A Dtest-backtrace.c7 size_t entries; main() local
9 entries = backtrace(backtrace_fns, 10); main()
10 backtrace_symbols_fd(backtrace_fns, entries, 1); main()
/linux-4.4.14/tools/perf/tests/
H A Dfdarray.c12 fda->entries[fd].fd = fda->nr - fd; fdarray__init_revents()
13 fda->entries[fd].revents = revents; fdarray__init_revents()
55 fda->entries[2].revents = POLLIN; test__fdarray__filter()
56 expected_fd[0] = fda->entries[2].fd; test__fdarray__filter()
58 pr_debug("\nfiltering all but fda->entries[2]:"); test__fdarray__filter()
67 if (fda->entries[0].fd != expected_fd[0]) { test__fdarray__filter()
68 pr_debug("\nfda->entries[0].fd=%d != %d\n", test__fdarray__filter()
69 fda->entries[0].fd, expected_fd[0]); test__fdarray__filter()
74 fda->entries[0].revents = POLLIN; test__fdarray__filter()
75 expected_fd[0] = fda->entries[0].fd; test__fdarray__filter()
76 fda->entries[3].revents = POLLIN; test__fdarray__filter()
77 expected_fd[1] = fda->entries[3].fd; test__fdarray__filter()
79 pr_debug("\nfiltering all but (fda->entries[0], fda->entries[3]):"); test__fdarray__filter()
90 if (fda->entries[fd].fd != expected_fd[fd]) { test__fdarray__filter()
91 pr_debug("\nfda->entries[%d].fd=%d != %d\n", fd, test__fdarray__filter()
92 fda->entries[fd].fd, expected_fd[fd]); test__fdarray__filter()
117 if (fda->entries[_idx].fd != _fd) { \ test__fdarray__add()
118 pr_debug("\n%d: fda->entries[%d](%d) != %d!", \ test__fdarray__add()
119 __LINE__, _idx, fda->entries[1].fd, _fd); \ test__fdarray__add()
122 if (fda->entries[_idx].events != (_revents)) { \ test__fdarray__add()
123 pr_debug("\n%d: fda->entries[%d].revents(%d) != %d!", \ test__fdarray__add()
124 __LINE__, _idx, fda->entries[_idx].fd, _revents); \ test__fdarray__add()
148 if (fda->entries == NULL) { test__fdarray__add()
H A Dhists_filter.c57 * so total 9 entries will be in the tree. add_hist_entries()
156 TEST_ASSERT_VAL("Invalid nr hist entries", evlist__for_each()
163 TEST_ASSERT_VAL("Unmatched nr hist entries", evlist__for_each()
181 TEST_ASSERT_VAL("Invalid nr hist entries", evlist__for_each()
189 TEST_ASSERT_VAL("Unmatched nr hist entries for thread filter", evlist__for_each()
210 TEST_ASSERT_VAL("Invalid nr hist entries", evlist__for_each()
218 TEST_ASSERT_VAL("Unmatched nr hist entries for dso filter", evlist__for_each()
245 TEST_ASSERT_VAL("Invalid nr hist entries", evlist__for_each()
253 TEST_ASSERT_VAL("Unmatched nr hist entries for symbol filter", evlist__for_each()
274 TEST_ASSERT_VAL("Invalid nr hist entries", evlist__for_each()
282 TEST_ASSERT_VAL("Unmatched nr hist entries for socket filter", evlist__for_each()
305 TEST_ASSERT_VAL("Invalid nr hist entries", evlist__for_each()
313 TEST_ASSERT_VAL("Unmatched nr hist entries for all filter", evlist__for_each()
H A Dhists_link.c73 * "bash [libc] malloc" so total 9 entries will be in the tree. add_hist_entries()
158 * Only entries from fake_common_samples should have a pair. __validate_match()
186 pr_debug("Invalid count for matched entries: %zd of %zd\n", __validate_match()
208 * Leader hists (idx = 0) will have dummy entries from other, __validate_link()
209 * and some entries will have no pair. However every entry __validate_link()
247 pr_debug("Invalid count of dummy entries: %zd of %zd\n", __validate_link()
252 pr_debug("Invalid count of total leader entries: %zd of %zd\n", __validate_link()
258 pr_debug("Invalid count of total other entries: %zd of %zd\n", __validate_link()
263 pr_debug("Other hists should not have dummy entries: %zd\n", __validate_link()
329 /* match common entries */
335 /* link common and/or dummy entries */
/linux-4.4.14/tools/lib/lockdep/uinclude/linux/
H A Dstacktrace.h8 unsigned long *entries; member in struct:stack_trace
14 backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1); print_stack_trace()
19 backtrace((void **)(trace)->entries, (trace)->max_entries))
/linux-4.4.14/tools/lib/api/fd/
H A Darray.c15 fda->entries = NULL; fdarray__init()
27 struct pollfd *entries = realloc(fda->entries, size); fdarray__grow() local
29 if (entries == NULL) fdarray__grow()
34 free(entries); fdarray__grow()
39 fda->entries = entries; fdarray__grow()
62 free(fda->entries); fdarray__exit()
81 fda->entries[fda->nr].fd = fd; fdarray__add()
82 fda->entries[fda->nr].events = revents; fdarray__add()
96 if (fda->entries[fd].revents & revents) { fdarray__filter()
104 fda->entries[nr] = fda->entries[fd]; fdarray__filter()
116 return poll(fda->entries, fda->nr, timeout); fdarray__poll()
124 printed += fprintf(fp, "%s%d", fd ? ", " : "", fda->entries[fd].fd); fdarray__fprintf()
H A Darray.h12 * not set it to anything, as it is kept in synch with @entries, being
22 struct pollfd *entries; member in struct:fdarray
/linux-4.4.14/kernel/events/
H A Dcallchain.c39 struct callchain_cpus_entries *entries; release_callchain_buffers_rcu() local
42 entries = container_of(head, struct callchain_cpus_entries, rcu_head); release_callchain_buffers_rcu()
45 kfree(entries->cpu_entries[cpu]); release_callchain_buffers_rcu()
47 kfree(entries); release_callchain_buffers_rcu()
52 struct callchain_cpus_entries *entries; release_callchain_buffers() local
54 entries = callchain_cpus_entries; release_callchain_buffers()
56 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu); release_callchain_buffers()
63 struct callchain_cpus_entries *entries; alloc_callchain_buffers() local
72 entries = kzalloc(size, GFP_KERNEL); alloc_callchain_buffers()
73 if (!entries) alloc_callchain_buffers()
79 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL, for_each_possible_cpu()
81 if (!entries->cpu_entries[cpu]) for_each_possible_cpu()
85 rcu_assign_pointer(callchain_cpus_entries, entries);
91 kfree(entries->cpu_entries[cpu]);
92 kfree(entries);
138 struct callchain_cpus_entries *entries; get_callchain_entry() local
144 entries = rcu_dereference(callchain_cpus_entries); get_callchain_entry()
145 if (!entries) get_callchain_entry()
150 return &entries->cpu_entries[cpu][*rctx]; get_callchain_entry()
/linux-4.4.14/include/linux/
H A Dauxvec.h6 #define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
H A Dllist.h29 * The list entries deleted via llist_del_all can be traversed with
31 * entries can not be traversed safely before deleted from the list.
32 * The order of deleted entries is from the newest to the oldest added
91 * llist_for_each - iterate over some deleted entries of a lock-less list
93 * @node: the first entry of deleted list entries
95 * In general, some entries of the lock-less list can be traversed
99 * If being used on entries deleted from lock-less list directly, the
108 * llist_for_each_entry - iterate over some deleted entries of lock-less list of given type
110 * @node: the fist entry of deleted list entries.
113 * In general, some entries of the lock-less list can be traversed
117 * If being used on entries deleted from lock-less list directly, the
128 * llist_for_each_entry_safe - iterate over some deleted entries of lock-less list of given type
132 * @node: the first entry of deleted list entries.
135 * In general, some entries of the lock-less list can be traversed
139 * If being used on entries deleted from lock-less list directly, the
184 * llist_del_all - delete all entries from lock-less list
185 * @head: the head of lock-less list to delete all entries
187 * If list is empty, return NULL, otherwise, delete all entries and
188 * return the pointer to the first entry. The order of entries
H A Dstacktrace.h12 unsigned long *entries; member in struct:stack_trace
13 int skip; /* input argument: How many entries to skip */
H A Ddqblk_qtree.h48 unsigned long long entries = epb; qtree_depth() local
51 for (i = 1; entries < (1ULL << 32); i++) qtree_depth()
52 entries *= epb; qtree_depth()
H A Dnfsacl.h14 /* Maximum number of ACL entries over NFS */
H A Dsysv_fs.h26 #define XENIX_NICINOD 100 /* number of inode cache entries */
27 #define XENIX_NICFREE 100 /* number of free block list chunk entries */
62 #define SYSV_NICINOD 100 /* number of inode cache entries */
63 #define SYSV_NICFREE 50 /* number of free block list chunk entries */
126 #define V7_NICINOD 100 /* number of inode cache entries */
127 #define V7_NICFREE 50 /* number of free block list chunk entries */
154 * that no reasonable file system would have that much entries in root
164 #define COH_NICINOD 100 /* number of inode cache entries */
165 #define COH_NICFREE 64 /* number of free block list chunk entries */
H A Dremoteproc.h48 * @num: number of resource entries
50 * @offset: array of offsets pointing at the various resource entries
53 * by the remote processor. It may also include configuration entries.
57 * Some resources entries are mere announcements, where the host is informed
58 * of specific remoteproc configuration. Other entries require the host to
66 * future), the number of available resource entries, and their offsets
69 * Immediately following this header are the resource entries themselves,
94 * enum fw_resource_type - types of resource entries
135 * These request entries should precede other firmware resource entries,
136 * as other entries might request placing other data objects inside
137 * these memory regions (e.g. data/code segments, trace resource entries, ...).
140 * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
198 * Note: at this point we just "trust" those devmem entries to contain valid
227 * user via debugfs entries (called trace0, trace1, etc..).
275 * @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'.
/linux-4.4.14/tools/perf/util/
H A Dpstack.c16 void *entries[0]; member in struct:pstack
43 if (pstack->entries[i] == key) { pstack__remove()
45 memmove(pstack->entries + i, pstack__remove()
46 pstack->entries + i + 1, pstack__remove()
61 pstack->entries[pstack->top++] = key; pstack__push()
73 ret = pstack->entries[--pstack->top]; pstack__pop()
74 pstack->entries[pstack->top] = NULL; pstack__pop()
82 return pstack->entries[pstack->top - 1]; pstack__peek()
H A Dxyarray.c12 xy->entries = xlen * ylen; xyarray__new()
20 size_t n = xy->entries * xy->entry_size; xyarray__reset()
H A Drblist.c16 struct rb_node **p = &rblist->entries.rb_node; rblist__add_node()
38 rb_insert_color(new_node, &rblist->entries); rblist__add_node()
46 rb_erase(rb_node, &rblist->entries); rblist__remove_node()
55 struct rb_node **p = &rblist->entries.rb_node; __rblist__findnew()
76 rb_insert_color(new_node, &rblist->entries); __rblist__findnew()
97 rblist->entries = RB_ROOT; rblist__init()
107 struct rb_node *pos, *next = rb_first(&rblist->entries); rblist__delete()
122 for (node = rb_first(&rblist->entries); node; node = rb_next(node)) { rblist__entry()
H A Dxyarray.h9 size_t entries; member in struct:xyarray
H A Devlist.h38 struct list_head entries; member in struct:perf_evlist
189 return list_entry(evlist->entries.next, struct perf_evsel, node); perf_evlist__first()
194 return list_entry(evlist->entries.prev, struct perf_evsel, node); perf_evlist__last()
239 __evlist__for_each(&(evlist)->entries, evsel)
255 __evlist__for_each_continue(&(evlist)->entries, evsel)
271 __evlist__for_each_reverse(&(evlist)->entries, evsel)
289 __evlist__for_each_safe(&(evlist)->entries, tmp, evsel)
/linux-4.4.14/arch/powerpc/perf/
H A Dbhrb.S2 * Basic assembly code to read BHRB entries
19 * The maximum number of BHRB entries supported with PPC_MFBHRBE instruction
20 * is 1024. We have limited number of table entries here as POWER8 implements
21 * 32 BHRB entries.
/linux-4.4.14/arch/ia64/include/uapi/asm/
H A Dauxvec.h11 #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
/linux-4.4.14/drivers/misc/vmw_vmci/
H A Dvmci_handle_array.c66 array->entries[array->size] = handle; vmci_handle_arr_append_entry()
80 if (vmci_handle_is_equal(array->entries[i], entry_handle)) { vmci_handle_arr_remove_entry()
81 handle = array->entries[i]; vmci_handle_arr_remove_entry()
83 array->entries[i] = array->entries[array->size]; vmci_handle_arr_remove_entry()
84 array->entries[array->size] = VMCI_INVALID_HANDLE; vmci_handle_arr_remove_entry()
101 handle = array->entries[array->size]; vmci_handle_arr_remove_tail()
102 array->entries[array->size] = VMCI_INVALID_HANDLE; vmci_handle_arr_remove_tail()
117 return array->entries[index]; vmci_handle_arr_get_entry()
126 if (vmci_handle_is_equal(array->entries[i], entry_handle)) vmci_handle_arr_has_entry()
139 return array->entries; vmci_handle_arr_get_handles()
H A Dvmci_resource.c30 struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS]; member in struct:vmci_hash_table
53 &vmci_resource_table.entries[idx], node) { vmci_resource_lookup()
134 hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]); vmci_resource_add()
152 hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) { vmci_resource_remove()
/linux-4.4.14/arch/powerpc/include/uapi/asm/
H A Dauxvec.h5 * We need to put in some extra aux table entries to tell glibc what
19 #define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */
/linux-4.4.14/drivers/net/ethernet/cavium/thunder/
H A Dnicvf_queues.h35 #define RBDR_SIZE0 0ULL /* 8K entries */
36 #define RBDR_SIZE1 1ULL /* 16K entries */
37 #define RBDR_SIZE2 2ULL /* 32K entries */
38 #define RBDR_SIZE3 3ULL /* 64K entries */
39 #define RBDR_SIZE4 4ULL /* 126K entries */
40 #define RBDR_SIZE5 5ULL /* 256K entries */
41 #define RBDR_SIZE6 6ULL /* 512K entries */
43 #define SND_QUEUE_SIZE0 0ULL /* 1K entries */
44 #define SND_QUEUE_SIZE1 1ULL /* 2K entries */
45 #define SND_QUEUE_SIZE2 2ULL /* 4K entries */
46 #define SND_QUEUE_SIZE3 3ULL /* 8K entries */
47 #define SND_QUEUE_SIZE4 4ULL /* 16K entries */
48 #define SND_QUEUE_SIZE5 5ULL /* 32K entries */
49 #define SND_QUEUE_SIZE6 6ULL /* 64K entries */
51 #define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
52 #define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
53 #define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
54 #define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
55 #define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
56 #define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
57 #define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
/linux-4.4.14/lib/
H A Dtest_rhashtable.c31 static int entries = 50000; variable
32 module_param(entries, int, 0);
33 MODULE_PARM_DESC(entries, "Number of entries to add (default: 50000)");
83 for (i = 0; i < entries * 2; i++) { test_rht_lookup()
149 pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n", test_bucket_stats()
150 total, atomic_read(&ht->nelems), entries, chain_len); test_bucket_stats()
152 if (total != atomic_read(&ht->nelems) || total != entries) test_bucket_stats()
165 * Insert entries into table with all keys even numbers test_rhashtable()
167 pr_info(" Adding %d keys\n", entries); test_rhashtable()
169 for (i = 0; i < entries; i++) { test_rhashtable()
197 pr_info(" Deleting %d keys\n", entries); test_rhashtable()
198 for (i = 0; i < entries; i++) { test_rhashtable()
223 for (i = 0; i < entries; i++) { thread_lookup_test()
252 for (i = 0; i < entries; i++) { threadfunc()
277 for (i = 0; i < entries; i += step) { threadfunc()
311 entries = min(entries, MAX_ENTRIES); test_rht_init()
354 objs = vzalloc(tcount * entries * sizeof(struct test_obj)); test_rht_init()
370 tdata[i].objs = objs + i * entries; test_rht_init()
H A Dfault-inject.c70 unsigned long entries[MAX_STACK_TRACE_DEPTH]; fail_stacktrace() local
78 trace.entries = entries; fail_stacktrace()
84 if (attr->reject_start <= entries[n] && fail_stacktrace()
85 entries[n] < attr->reject_end) fail_stacktrace()
87 if (attr->require_start <= entries[n] && fail_stacktrace()
88 entries[n] < attr->require_end) fail_stacktrace()
H A Dscatterlist.c42 * sg_nents - return total count of entries in scatterlist
46 * Allows to know how many entries are in sg, taking into acount
60 * sg_nents_for_len - return total count of entries in scatterlist
66 * Determines the number of entries in sg that are required to meet
70 * the number of sg entries needed, negative error on failure
95 * @nents: Number of entries in the scatterlist
102 * the important bit is that @nents@ denotes the number of entries that
125 * @nents: Number of entries in table
195 * @max_ents: The maximum number of entries per single scatterlist
259 * @nents: Number of entries in sg list
260 * @max_ents: The maximum number of entries the allocator returns per call
336 * If no more entries after this one, mark the end __sg_alloc_table()
351 * @nents: Number of entries in sg list
476 * @nents: number of sg entries
639 * @nents: Number of SG entries
692 * @nents: Number of SG entries
709 * @nents: Number of SG entries
726 * @nents: Number of SG entries
744 * @nents: Number of SG entries
/linux-4.4.14/arch/powerpc/mm/
H A Dmmu_context_iommu.c28 u64 entries; /* number of entries in hpas[] */ member in struct:mm_iommu_table_group_mem_t
75 long mm_iommu_get(unsigned long ua, unsigned long entries, mm_iommu_get() argument
89 if ((mem->ua == ua) && (mem->entries == entries)) { mm_iommu_get()
96 if ((mem->ua < (ua + (entries << PAGE_SHIFT))) && mm_iommu_get()
98 (mem->entries << PAGE_SHIFT)))) { mm_iommu_get()
105 ret = mm_iommu_adjust_locked_vm(current->mm, entries, true); mm_iommu_get()
109 locked_entries = entries; mm_iommu_get()
117 mem->hpas = vzalloc(entries * sizeof(mem->hpas[0])); mm_iommu_get()
124 for (i = 0; i < entries; ++i) { mm_iommu_get()
142 mem->entries = entries; mm_iommu_get()
162 for (i = 0; i < mem->entries; ++i) { mm_iommu_unpin()
194 mm_iommu_adjust_locked_vm(current->mm, mem->entries, false); mm_iommu_release()
244 (mem->entries << PAGE_SHIFT))) { mm_iommu_lookup()
255 unsigned long entries) mm_iommu_find()
262 if ((mem->ua == ua) && (mem->entries == entries)) { mm_iommu_find()
278 if (entry >= mem->entries) mm_iommu_ua_to_hpa()
254 mm_iommu_find(unsigned long ua, unsigned long entries) mm_iommu_find() argument
/linux-4.4.14/net/netfilter/
H A Dxt_repldata.h4 * 'entries' and 'term' are never anywhere referenced by word in code. In fact,
11 * struct type##_standard entries[nhooks];
22 struct type##_standard entries[]; \
25 size_t term_offset = (offsetof(typeof(*tbl), entries[nhooks]) + \
42 tbl->entries[i++] = (struct type##_standard) \
/linux-4.4.14/include/video/
H A Dmaxinefb.h34 * The color palette entries have the form 0x00BBGGRR
36 #define IMS332_REG_COLOR_PALETTE 0x100 /* color palette, 256 entries */
38 /* 3 entries */
H A Dvideomode.h48 * @disp: structure with all possible timing entries
/linux-4.4.14/kernel/trace/
H A Dtrace_stat.h14 /* Iteration over statistic entries */
17 /* Compare two entries for stats sorting */
23 /* Print the headers of your stat entries */
/linux-4.4.14/tools/include/linux/
H A Dlist.h10 * list_del_range - deletes range of entries from list.
13 * Note: list_empty on the range of entries does not return true after this,
14 * the entries is in an undefined state.
/linux-4.4.14/fs/f2fs/
H A Dshrinker.c57 /* count extent cache entries */ f2fs_shrink_count()
60 /* shrink clean nat cache entries */ f2fs_shrink_count()
63 /* count free nids cache entries */ f2fs_shrink_count()
103 /* shrink extent cache entries */ f2fs_shrink_scan()
106 /* shrink clean nat cache entries */ f2fs_shrink_scan()
110 /* shrink free nids cache entries */ f2fs_shrink_scan()
/linux-4.4.14/arch/x86/kernel/cpu/
H A Dintel.c152 * the TLB when any changes are made to any of the page table entries. early_init_intel()
581 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
611 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
612 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
613 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
614 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
617 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
618 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
619 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
620 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
621 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
622 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
623 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
624 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
625 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
626 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
627 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
628 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
631 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
632 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
633 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
634 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
635 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
636 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
639 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
640 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
643 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
644 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
647 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
648 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
649 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
650 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
654 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
655 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
659 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
660 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
664 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
665 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
666 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
667 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
670 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
671 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
672 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
673 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
676 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries) intel_tlb_lookup()
677 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries; intel_tlb_lookup()
H A Dmatch.c12 * Return the entry if the current CPU matches the entries in the
15 * respective wildcard entries.
H A Dcpu.h34 unsigned int entries; member in struct:_tlb_table
/linux-4.4.14/arch/x86/include/uapi/asm/
H A De820.h4 #define E820MAX 128 /* number of entries in E820MAP */
12 * nodes, based on up to three entries per node for which the
15 * entries that might need room in the same arrays, prior to the
17 * of three memory map entries per node is "enough" entries for
19 * use of additional EFI map entries. Future platforms may want
20 * to allow more than three entries per node or otherwise refine
28 #define E820NR 0x1e8 /* # entries in E820MAP */
H A Dauxvec.h12 /* entries in ARCH_DLINFO: */
H A Dldt.h9 /* Maximum number of LDT entries supported. */
/linux-4.4.14/fs/quota/
H A Dquota_tree.h13 * there will be space for exactly 21 quota-entries in a block
18 __le16 dqdh_entries; /* Number of valid entries in block */
/linux-4.4.14/include/uapi/linux/
H A Dnet_dropmon.h30 __u32 entries; member in struct:net_dm_config_msg
35 __u32 entries; member in struct:net_dm_alert_msg
H A Dnfsacl.h27 /* Flag for Default ACL entries */
H A Dauxvec.h6 /* Symbolic values for the entries in the auxiliary table
/linux-4.4.14/drivers/gpu/drm/radeon/
H A Dr600_dpm.c825 radeon_table->entries = kzalloc(size, GFP_KERNEL); r600_parse_clk_voltage_dep_table()
826 if (!radeon_table->entries) r600_parse_clk_voltage_dep_table()
829 entry = &atom_table->entries[0]; r600_parse_clk_voltage_dep_table()
831 radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | r600_parse_clk_voltage_dep_table()
833 radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage); r600_parse_clk_voltage_dep_table()
935 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); r600_parse_extended_power_table()
946 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); r600_parse_extended_power_table()
947 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); r600_parse_extended_power_table()
958 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries); r600_parse_extended_power_table()
959 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries); r600_parse_extended_power_table()
960 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries); r600_parse_extended_power_table()
971 le16_to_cpu(clk_v->entries[0].usSclkLow) | r600_parse_extended_power_table()
972 (clk_v->entries[0].ucSclkHigh << 16); r600_parse_extended_power_table()
974 le16_to_cpu(clk_v->entries[0].usMclkLow) | r600_parse_extended_power_table()
975 (clk_v->entries[0].ucMclkHigh << 16); r600_parse_extended_power_table()
977 le16_to_cpu(clk_v->entries[0].usVddc); r600_parse_extended_power_table()
979 le16_to_cpu(clk_v->entries[0].usVddci); r600_parse_extended_power_table()
989 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = r600_parse_extended_power_table()
993 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { r600_parse_extended_power_table()
998 entry = &psl->entries[0]; r600_parse_extended_power_table()
1000 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = r600_parse_extended_power_table()
1002 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = r600_parse_extended_power_table()
1004 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = r600_parse_extended_power_table()
1036 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); r600_parse_extended_power_table()
1037 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) { r600_parse_extended_power_table()
1041 entry = &cac_table->entries[0]; r600_parse_extended_power_table()
1044 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = r600_parse_extended_power_table()
1046 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = r600_parse_extended_power_table()
1048 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = r600_parse_extended_power_table()
1051 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = r600_parse_extended_power_table()
1053 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = r600_parse_extended_power_table()
1090 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = r600_parse_extended_power_table()
1092 if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { r600_parse_extended_power_table()
1098 entry = &limits->entries[0]; r600_parse_extended_power_table()
1099 state_entry = &states->entries[0]; r600_parse_extended_power_table()
1102 ((u8 *)&array->entries[0] + r600_parse_extended_power_table()
1104 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = r600_parse_extended_power_table()
1106 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = r600_parse_extended_power_table()
1108 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = r600_parse_extended_power_table()
1117 ((u8 *)&array->entries[0] + r600_parse_extended_power_table()
1144 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = r600_parse_extended_power_table()
1146 if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { r600_parse_extended_power_table()
1152 entry = &limits->entries[0]; r600_parse_extended_power_table()
1155 ((u8 *)&array->entries[0] + r600_parse_extended_power_table()
1157 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = r600_parse_extended_power_table()
1159 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = r600_parse_extended_power_table()
1161 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = r600_parse_extended_power_table()
1176 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = r600_parse_extended_power_table()
1178 if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { r600_parse_extended_power_table()
1184 entry = &limits->entries[0]; r600_parse_extended_power_table()
1186 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = r600_parse_extended_power_table()
1188 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = r600_parse_extended_power_table()
1234 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = r600_parse_extended_power_table()
1236 if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { r600_parse_extended_power_table()
1242 entry = &limits->entries[0]; r600_parse_extended_power_table()
1244 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = r600_parse_extended_power_table()
1246 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = r600_parse_extended_power_table()
1299 kfree(dyn_state->vddc_dependency_on_sclk.entries); r600_free_extended_power_table()
1300 kfree(dyn_state->vddci_dependency_on_mclk.entries); r600_free_extended_power_table()
1301 kfree(dyn_state->vddc_dependency_on_mclk.entries); r600_free_extended_power_table()
1302 kfree(dyn_state->mvdd_dependency_on_mclk.entries); r600_free_extended_power_table()
1303 kfree(dyn_state->cac_leakage_table.entries); r600_free_extended_power_table()
1304 kfree(dyn_state->phase_shedding_limits_table.entries); r600_free_extended_power_table()
1307 kfree(dyn_state->vce_clock_voltage_dependency_table.entries); r600_free_extended_power_table()
1308 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); r600_free_extended_power_table()
1309 kfree(dyn_state->samu_clock_voltage_dependency_table.entries); r600_free_extended_power_table()
1310 kfree(dyn_state->acp_clock_voltage_dependency_table.entries); r600_free_extended_power_table()
H A Dkv_dpm.c561 return vddc_sclk_table->entries[vid_2bit].v; kv_convert_vid2_to_vid7()
563 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; kv_convert_vid2_to_vid7()
566 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) kv_convert_vid2_to_vid7()
567 return vid_mapping_table->entries[i].vid_7bit; kv_convert_vid2_to_vid7()
569 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; kv_convert_vid2_to_vid7()
583 if (vddc_sclk_table->entries[i].v == vid_7bit) kv_convert_vid7_to_vid2()
589 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) kv_convert_vid7_to_vid2()
590 return vid_mapping_table->entries[i].vid_2bit; kv_convert_vid7_to_vid2()
593 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; kv_convert_vid7_to_vid2()
723 if (table->entries[i].clk == pi->boot_pl.sclk) kv_program_bootup_state()
737 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) kv_program_bootup_state()
832 (pi->high_voltage_t < table->entries[i].v)) kv_populate_uvd_table()
835 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); kv_populate_uvd_table()
836 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); kv_populate_uvd_table()
837 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); kv_populate_uvd_table()
840 (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk); kv_populate_uvd_table()
842 (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk); kv_populate_uvd_table()
845 table->entries[i].vclk, false, &dividers); kv_populate_uvd_table()
851 table->entries[i].dclk, false, &dividers); kv_populate_uvd_table()
903 pi->high_voltage_t < table->entries[i].v) kv_populate_vce_table()
906 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); kv_populate_vce_table()
907 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); kv_populate_vce_table()
910 (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk); kv_populate_vce_table()
913 table->entries[i].evclk, false, &dividers); kv_populate_vce_table()
966 pi->high_voltage_t < table->entries[i].v) kv_populate_samu_table()
969 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); kv_populate_samu_table()
970 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); kv_populate_samu_table()
973 (u8)kv_get_clk_bypass(rdev, table->entries[i].clk); kv_populate_samu_table()
976 table->entries[i].clk, false, &dividers); kv_populate_samu_table()
1031 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); kv_populate_acp_table()
1032 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); kv_populate_acp_table()
1035 table->entries[i].clk, false, &dividers); kv_populate_acp_table()
1085 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) kv_calculate_dfs_bypass_settings()
1087 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) kv_calculate_dfs_bypass_settings()
1089 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) kv_calculate_dfs_bypass_settings()
1091 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) kv_calculate_dfs_bypass_settings()
1093 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) kv_calculate_dfs_bypass_settings()
1106 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) kv_calculate_dfs_bypass_settings()
1108 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) kv_calculate_dfs_bypass_settings()
1110 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) kv_calculate_dfs_bypass_settings()
1112 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) kv_calculate_dfs_bypass_settings()
1114 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) kv_calculate_dfs_bypass_settings()
1466 if (table->entries[i].evclk >= evclk) kv_get_vce_boot_level()
1554 if (table->entries[i].clk >= 0) /* XXX */ kv_get_acp_boot_level()
1716 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || kv_set_valid_clock_range()
1724 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) kv_set_valid_clock_range()
1730 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > kv_set_valid_clock_range()
1731 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) kv_set_valid_clock_range()
1741 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || kv_set_valid_clock_range()
1749 if (table->entries[i].sclk_frequency <= kv_set_valid_clock_range()
1757 table->entries[pi->highest_valid].sclk_frequency) > kv_set_valid_clock_range()
1758 (table->entries[pi->lowest_valid].sclk_frequency - kv_set_valid_clock_range()
1974 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; kv_construct_max_power_limits_table()
1977 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); kv_construct_max_power_limits_table()
1997 uvd_table->entries[i].v = kv_patch_voltage_values()
1999 uvd_table->entries[i].v); kv_patch_voltage_values()
2004 vce_table->entries[i].v = kv_patch_voltage_values()
2006 vce_table->entries[i].v); kv_patch_voltage_values()
2011 samu_table->entries[i].v = kv_patch_voltage_values()
2013 samu_table->entries[i].v); kv_patch_voltage_values()
2018 acp_table->entries[i].v = kv_patch_voltage_values()
2020 acp_table->entries[i].v); kv_patch_voltage_values()
2113 (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <= kv_get_high_voltage_limit()
2125 (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <= kv_get_high_voltage_limit()
2168 if (stable_p_state_sclk >= table->entries[i].clk) { kv_apply_state_adjust_rules()
2169 stable_p_state_sclk = table->entries[i].clk; kv_apply_state_adjust_rules()
2175 stable_p_state_sclk = table->entries[0].clk; kv_apply_state_adjust_rules()
2198 ps->levels[i].sclk = table->entries[limit].clk; kv_apply_state_adjust_rules()
2210 ps->levels[i].sclk = table->entries[limit].sclk_frequency; kv_apply_state_adjust_rules()
2361 kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v))) kv_init_graphics_levels()
2364 kv_set_divider_value(rdev, i, table->entries[i].clk); kv_init_graphics_levels()
2367 table->entries[i].v); kv_init_graphics_levels()
2381 kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit)) kv_init_graphics_levels()
2384 kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency); kv_init_graphics_levels()
2385 kv_set_vid(rdev, i, table->entries[i].vid_2bit); kv_init_graphics_levels()
H A Dpptable.h475 UCHAR ucNumEntries; // Number of entries.
476 ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries. member in struct:_ATOM_PPLIB_Clock_Voltage_Dependency_Table
491 UCHAR ucNumEntries; // Number of entries.
492 ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries. member in struct:_ATOM_PPLIB_Clock_Voltage_Limit_Table
515 UCHAR ucNumEntries; // Number of entries.
516 ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries. member in struct:_ATOM_PPLIB_CAC_Leakage_Table
530 UCHAR ucNumEntries; // Number of entries.
531 ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries. member in struct:_ATOM_PPLIB_PhaseSheddingLimits_Table
543 VCEClockInfo entries[1]; member in struct:_VCEClockInfoArray
555 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1]; member in struct:_ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
567 ATOM_PPLIB_VCE_State_Record entries[1]; member in struct:_ATOM_PPLIB_VCE_State_Table
589 UVDClockInfo entries[1]; member in struct:_UVDClockInfoArray
601 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1]; member in struct:_ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
620 ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1]; member in struct:_ATOM_PPLIB_SAMClk_Voltage_Limit_Table
638 ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1]; member in struct:_ATOM_PPLIB_ACPClk_Voltage_Limit_Table
/linux-4.4.14/drivers/isdn/hardware/eicon/
H A Dentity.h21 diva_um_idi_data_queue_t rc; /* two entries */
/linux-4.4.14/drivers/iio/imu/
H A DMakefile5 # When adding new entries keep the list in alphabetical order
/linux-4.4.14/drivers/iio/magnetometer/
H A DMakefile5 # When adding new entries keep the list in alphabetical order
/linux-4.4.14/drivers/iio/pressure/
H A DMakefile5 # When adding new entries keep the list in alphabetical order
/linux-4.4.14/arch/cris/include/arch-v10/arch/
H A Dtlb.h6 * so we can make TLB entries that will never match.
/linux-4.4.14/fs/nfs_common/
H A Dnfsacl.c12 * four instead of three entries.
15 * the ACL_MASK and ACL_GROUP_OBJ entries may differ.)
17 * entries contain the identifiers of the owner and owning group.
19 * - ACL entries in the kernel are kept sorted in ascending order
94 int entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0; nfsacl_encode() local
98 .array_len = encode_entries ? entries : 0, nfsacl_encode()
109 if (entries > NFS_ACL_MAX_ENTRIES || nfsacl_encode()
110 xdr_encode_word(buf, base, entries)) nfsacl_encode()
121 /* Insert entries in canonical order: other orders seem nfsacl_encode()
229 /* Find the ACL_GROUP_OBJ and ACL_MASK entries. */ FOREACH_ACL_ENTRY()
273 u32 entries; nfsacl_decode() local
276 if (xdr_decode_word(buf, base, &entries) || nfsacl_decode()
277 entries > NFS_ACL_MAX_ENTRIES) nfsacl_decode()
279 nfsacl_desc.desc.array_maxlen = entries; nfsacl_decode()
284 if (entries != nfsacl_desc.desc.array_len || nfsacl_decode()
292 *aclcnt = entries; nfsacl_decode()
/linux-4.4.14/kernel/
H A Dbacktracetest.c52 unsigned long entries[8]; backtrace_test_saved() local
58 trace.max_entries = ARRAY_SIZE(entries); backtrace_test_saved()
59 trace.entries = entries; backtrace_test_saved()
H A Dstacktrace.c18 if (WARN_ON(!trace->entries)) print_stack_trace()
23 print_ip_sym(trace->entries[i]); print_stack_trace()
36 if (WARN_ON(!trace->entries)) snprint_stack_trace()
40 ip = trace->entries[i]; snprint_stack_trace()
H A Djump_label.c158 return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK); static_key_entries()
163 return (unsigned long)key->entries & JUMP_TYPE_MASK; static_key_type()
224 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. jump_label_init()
226 *((unsigned long *)&key->entries) += (unsigned long)iter; jump_label_init()
249 struct jump_entry *entries; member in struct:static_key_mod
275 __jump_label_update(key, mod->entries, __jump_label_mod_update()
294 /* if the module doesn't have jump label entries, just return */ jump_label_apply_nops()
313 /* if the module doesn't have jump label entries, just return */ jump_label_add_module()
329 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH. jump_label_add_module()
331 *((unsigned long *)&key->entries) += (unsigned long)iter; jump_label_add_module()
339 jlm->entries = iter; jump_label_add_module()
/linux-4.4.14/include/uapi/linux/netfilter/
H A Dxt_hashlimit.h34 __u32 max; /* max number of entries */
36 __u32 expire; /* when do entries expire? */
58 __u32 max; /* max number of entries */
60 __u32 expire; /* when do entries expire? */
H A Dx_tables.h147 #define XT_ENTRY_ITERATE_CONTINUE(type, entries, size, n, fn, args...) \
155 __entry = (void *)(entries) + __i; \
167 #define XT_ENTRY_ITERATE(type, entries, size, fn, args...) \
168 XT_ENTRY_ITERATE_CONTINUE(type, entries, size, 0, fn, args)
/linux-4.4.14/arch/s390/include/asm/
H A Dlinkage.h12 * Helper macro for exception table entries
H A Dtlbflush.h10 * Flush all TLB entries on the local CPU.
18 * Flush TLB entries for a specific ASCE on all CPUs
29 * Flush TLB entries for a specific ASCE on the local CPU
43 * Flush all TLB entries on all CPUs.
61 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
84 * Flush TLB entries for a specific ASCE on all CPUs.
122 * Flush TLB entries for a specific ASCE on all CPUs.
/linux-4.4.14/arch/sh/kernel/
H A Dstacktrace.c41 trace->entries[trace->nr_entries++] = addr; save_stack_address()
55 trace->entries[trace->nr_entries++] = ULONG_MAX; save_stack_trace()
76 trace->entries[trace->nr_entries++] = addr; save_stack_address_nosched()
90 trace->entries[trace->nr_entries++] = ULONG_MAX; save_stack_trace_tsk()
H A Dhead_32.S92 * When we boot in 32-bit MMU mode there are 2 PMB entries already
105 * PMB entries. This clearing also deals with the fact that PMB entries
107 * when the reboot occurred, so to be safe we clear all entries and start
133 * r10 = number of PMB entries we've setup
157 * don't bother setting up new entries here, and let the late PMB
160 * Note that we may need to coalesce and merge entries in order
241 /* Increment number of PMB entries */ \
272 * Clear the remaining PMB entries.
275 * r10 = number of entries we've setup so far
/linux-4.4.14/arch/sparc/crypto/
H A Dcrop_devid.c7 * load any modules which have device table entries that
/linux-4.4.14/arch/sparc/include/asm/
H A Dtraps.h2 * traps.h: Format of entries for the Sparc trap table.
/linux-4.4.14/arch/parisc/kernel/
H A Dstacktrace.c34 /* unwind stack and save entries in stack_trace struct */ dump_trace()
41 trace->entries[trace->nr_entries++] = info.ip; dump_trace()
53 trace->entries[trace->nr_entries++] = ULONG_MAX; save_stack_trace()
61 trace->entries[trace->nr_entries++] = ULONG_MAX; save_stack_trace_tsk()
/linux-4.4.14/arch/cris/include/arch-v32/arch/
H A Dtlb.h7 * last page_id is never used so we can make TLB entries that never matches.
/linux-4.4.14/arch/cris/include/asm/
H A Daxisflashmap.h18 __u16 size; /* Length of ptable block (entries + end marker) */
19 __u32 checksum; /* simple longword sum, over entries + end marker */
22 /* And followed by partition table entries */
47 struct partitiontable_entry entries[]; member in struct:partitiontable
/linux-4.4.14/arch/x86/kernel/
H A Dstacktrace.c32 trace->entries[trace->nr_entries++] = addr; __save_stack_address()
65 trace->entries[trace->nr_entries++] = ULONG_MAX; save_stack_trace()
73 trace->entries[trace->nr_entries++] = ULONG_MAX; save_stack_trace_regs()
80 trace->entries[trace->nr_entries++] = ULONG_MAX; save_stack_trace_tsk()
114 trace->entries[trace->nr_entries++] = regs->ip; __save_stack_trace_user()
126 trace->entries[trace->nr_entries++] = __save_stack_trace_user()
144 trace->entries[trace->nr_entries++] = ULONG_MAX; save_stack_trace_user()
H A Dldt.c33 set_ldt(pc->ldt->entries, pc->ldt->size); flush_ldt()
59 new_ldt->entries = vzalloc(alloc_size); alloc_ldt_struct()
61 new_ldt->entries = (void *)get_zeroed_page(GFP_KERNEL); alloc_ldt_struct()
63 if (!new_ldt->entries) { alloc_ldt_struct()
75 paravirt_alloc_ldt(ldt->entries, ldt->size); finalize_ldt_struct()
94 paravirt_free_ldt(ldt->entries, ldt->size); free_ldt_struct()
96 vfree(ldt->entries); free_ldt_struct()
98 free_page((unsigned long)ldt->entries); free_ldt_struct()
131 memcpy(new_ldt->entries, old_mm->context.ldt->entries, init_new_context()
173 if (copy_to_user(ptr, mm->context.ldt->entries, size)) { read_ldt()
260 memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE); write_ldt()
261 new_ldt->entries[ldt_info.entry_number] = ldt; write_ldt()
H A Dtce_64.c2 * This file manages the translation entries for the IBM Calgary IOMMU.
92 * smallest table is 8K entries, so shift result by 13 to table_size_to_number_of_entries()
106 /* set the tce table size - measured in entries */ tce_table_setparms()
111 * entries; we need one bit per entry tce_table_setparms()
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_dpm.c265 amdgpu_table->entries = kzalloc(size, GFP_KERNEL); amdgpu_parse_clk_voltage_dep_table()
266 if (!amdgpu_table->entries) amdgpu_parse_clk_voltage_dep_table()
269 entry = &atom_table->entries[0]; amdgpu_parse_clk_voltage_dep_table()
271 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | amdgpu_parse_clk_voltage_dep_table()
273 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage); amdgpu_parse_clk_voltage_dep_table()
412 le16_to_cpu(clk_v->entries[0].usSclkLow) | amdgpu_parse_extended_power_table()
413 (clk_v->entries[0].ucSclkHigh << 16); amdgpu_parse_extended_power_table()
415 le16_to_cpu(clk_v->entries[0].usMclkLow) | amdgpu_parse_extended_power_table()
416 (clk_v->entries[0].ucMclkHigh << 16); amdgpu_parse_extended_power_table()
418 le16_to_cpu(clk_v->entries[0].usVddc); amdgpu_parse_extended_power_table()
420 le16_to_cpu(clk_v->entries[0].usVddci); amdgpu_parse_extended_power_table()
430 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = amdgpu_parse_extended_power_table()
434 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { amdgpu_parse_extended_power_table()
439 entry = &psl->entries[0]; amdgpu_parse_extended_power_table()
441 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = amdgpu_parse_extended_power_table()
443 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = amdgpu_parse_extended_power_table()
445 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = amdgpu_parse_extended_power_table()
477 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); amdgpu_parse_extended_power_table()
478 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) { amdgpu_parse_extended_power_table()
482 entry = &cac_table->entries[0]; amdgpu_parse_extended_power_table()
485 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = amdgpu_parse_extended_power_table()
487 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = amdgpu_parse_extended_power_table()
489 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = amdgpu_parse_extended_power_table()
492 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = amdgpu_parse_extended_power_table()
494 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = amdgpu_parse_extended_power_table()
531 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = amdgpu_parse_extended_power_table()
533 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { amdgpu_parse_extended_power_table()
539 entry = &limits->entries[0]; amdgpu_parse_extended_power_table()
540 state_entry = &states->entries[0]; amdgpu_parse_extended_power_table()
543 ((u8 *)&array->entries[0] + amdgpu_parse_extended_power_table()
545 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = amdgpu_parse_extended_power_table()
547 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = amdgpu_parse_extended_power_table()
549 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = amdgpu_parse_extended_power_table()
558 ((u8 *)&array->entries[0] + amdgpu_parse_extended_power_table()
585 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = amdgpu_parse_extended_power_table()
587 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { amdgpu_parse_extended_power_table()
593 entry = &limits->entries[0]; amdgpu_parse_extended_power_table()
596 ((u8 *)&array->entries[0] + amdgpu_parse_extended_power_table()
598 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = amdgpu_parse_extended_power_table()
600 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = amdgpu_parse_extended_power_table()
602 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = amdgpu_parse_extended_power_table()
617 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = amdgpu_parse_extended_power_table()
619 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { amdgpu_parse_extended_power_table()
625 entry = &limits->entries[0]; amdgpu_parse_extended_power_table()
627 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = amdgpu_parse_extended_power_table()
629 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = amdgpu_parse_extended_power_table()
675 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = amdgpu_parse_extended_power_table()
677 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { amdgpu_parse_extended_power_table()
683 entry = &limits->entries[0]; amdgpu_parse_extended_power_table()
685 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = amdgpu_parse_extended_power_table()
687 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = amdgpu_parse_extended_power_table()
740 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries); amdgpu_parse_extended_power_table()
753 kfree(dyn_state->vddc_dependency_on_sclk.entries); amdgpu_free_extended_power_table()
754 kfree(dyn_state->vddci_dependency_on_mclk.entries); amdgpu_free_extended_power_table()
755 kfree(dyn_state->vddc_dependency_on_mclk.entries); amdgpu_free_extended_power_table()
756 kfree(dyn_state->mvdd_dependency_on_mclk.entries); amdgpu_free_extended_power_table()
757 kfree(dyn_state->cac_leakage_table.entries); amdgpu_free_extended_power_table()
758 kfree(dyn_state->phase_shedding_limits_table.entries); amdgpu_free_extended_power_table()
761 kfree(dyn_state->vce_clock_voltage_dependency_table.entries); amdgpu_free_extended_power_table()
762 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); amdgpu_free_extended_power_table()
763 kfree(dyn_state->samu_clock_voltage_dependency_table.entries); amdgpu_free_extended_power_table()
764 kfree(dyn_state->acp_clock_voltage_dependency_table.entries); amdgpu_free_extended_power_table()
765 kfree(dyn_state->vddgfx_dependency_on_sclk.entries); amdgpu_free_extended_power_table()
H A Dcz_dpm.c78 table->sclk = dep_table->entries[dep_table->count - 1].clk; cz_construct_max_power_limits_table()
80 dep_table->entries[dep_table->count - 1].v); cz_construct_max_power_limits_table()
185 uvd_table->entries[i].v = cz_patch_voltage_values()
187 uvd_table->entries[i].v); cz_patch_voltage_values()
192 vce_table->entries[i].v = cz_patch_voltage_values()
194 vce_table->entries[i].v); cz_patch_voltage_values()
199 acp_table->entries[i].v = cz_patch_voltage_values()
201 acp_table->entries[i].v); cz_patch_voltage_values()
246 pl->sclk = table->entries[clock_info->carrizo.index].clk; cz_parse_pplib_clock_info()
247 pl->vddc_index = table->entries[clock_info->carrizo.index].v; cz_parse_pplib_clock_info()
527 sclk = table->entries[sclk_index].clk; cz_dpm_debugfs_print_current_performance_level()
544 vclk = uvd_table->entries[uvd_index].vclk; cz_dpm_debugfs_print_current_performance_level()
545 dclk = uvd_table->entries[uvd_index].dclk; cz_dpm_debugfs_print_current_performance_level()
555 ecclk = vce_table->entries[vce_index].ecclk; cz_dpm_debugfs_print_current_performance_level()
728 (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0; cz_dpm_upload_pptable_to_smu()
730 (i < vddc_table->count) ? vddc_table->entries[i].clk : 0; cz_dpm_upload_pptable_to_smu()
741 (i < vddgfx_table->count) ? (uint8_t)vddgfx_table->entries[i].v : 0; cz_dpm_upload_pptable_to_smu()
745 (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0; cz_dpm_upload_pptable_to_smu()
747 (i < acp_table->count) ? acp_table->entries[i].clk : 0; cz_dpm_upload_pptable_to_smu()
758 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; cz_dpm_upload_pptable_to_smu()
760 (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0; cz_dpm_upload_pptable_to_smu()
770 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; cz_dpm_upload_pptable_to_smu()
772 (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0; cz_dpm_upload_pptable_to_smu()
783 (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0; cz_dpm_upload_pptable_to_smu()
785 (i < vce_table->count) ? vce_table->entries[i].ecclk : 0; cz_dpm_upload_pptable_to_smu()
822 clock = table->entries[level].clk; cz_init_sclk_limit()
825 clock = table->entries[table->count - 1].clk; cz_init_sclk_limit()
850 clock = table->entries[level].vclk; cz_init_uvd_limit()
853 clock = table->entries[table->count - 1].vclk; cz_init_uvd_limit()
873 pi->vce_dpm.soft_min_clk = table->entries[0].ecclk; cz_init_vce_limit()
874 pi->vce_dpm.hard_min_clk = table->entries[0].ecclk; cz_init_vce_limit()
878 clock = table->entries[level].ecclk; cz_init_vce_limit()
882 clock = table->entries[table->count - 1].ecclk; cz_init_vce_limit()
907 clock = table->entries[level].clk; cz_init_acp_limit()
910 clock = table->entries[table->count - 1].clk; cz_init_acp_limit()
1031 if (clock <= table->entries[i].clk) cz_get_sclk_level()
1039 if (clock >= table->entries[i].clk) cz_get_sclk_level()
1065 if (clock <= table->entries[i].ecclk) cz_get_eclk_level()
1071 if (clock >= table->entries[i].ecclk) cz_get_eclk_level()
1709 pi->sclk_dpm.soft_min_clk = dep_table->entries[0].clk; cz_dpm_unforce_dpm_levels()
1712 pi->sclk_dpm.soft_max_clk = dep_table->entries[level].clk; cz_dpm_unforce_dpm_levels()
1715 dep_table->entries[dep_table->count - 1].clk; cz_dpm_unforce_dpm_levels()
1905 pi->vce_dpm.hard_min_clk = table->entries[table->count-1].ecclk; cz_update_vce_dpm()
1908 pi->vce_dpm.hard_min_clk = table->entries[0].ecclk; cz_update_vce_dpm()
H A Dkv_dpm.c83 return vddc_sclk_table->entries[vid_2bit].v; kv_convert_vid2_to_vid7()
85 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; kv_convert_vid2_to_vid7()
88 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) kv_convert_vid2_to_vid7()
89 return vid_mapping_table->entries[i].vid_7bit; kv_convert_vid2_to_vid7()
91 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; kv_convert_vid2_to_vid7()
105 if (vddc_sclk_table->entries[i].v == vid_7bit) kv_convert_vid7_to_vid2()
111 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) kv_convert_vid7_to_vid2()
112 return vid_mapping_table->entries[i].vid_2bit; kv_convert_vid7_to_vid2()
115 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; kv_convert_vid7_to_vid2()
153 sclk_voltage_mapping_table->entries[n].sclk_frequency = sumo_construct_sclk_voltage_mapping_table()
155 sclk_voltage_mapping_table->entries[n].vid_2bit = sumo_construct_sclk_voltage_mapping_table()
173 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = sumo_construct_vid_mapping_table()
175 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = sumo_construct_vid_mapping_table()
181 if (vid_mapping_table->entries[i].vid_7bit == 0) { sumo_construct_vid_mapping_table()
183 if (vid_mapping_table->entries[j].vid_7bit != 0) { sumo_construct_vid_mapping_table()
184 vid_mapping_table->entries[i] = sumo_construct_vid_mapping_table()
185 vid_mapping_table->entries[j]; sumo_construct_vid_mapping_table()
186 vid_mapping_table->entries[j].vid_7bit = 0; sumo_construct_vid_mapping_table()
812 if (table->entries[i].clk == pi->boot_pl.sclk) kv_program_bootup_state()
826 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) kv_program_bootup_state()
921 (pi->high_voltage_t < table->entries[i].v)) kv_populate_uvd_table()
924 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); kv_populate_uvd_table()
925 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); kv_populate_uvd_table()
926 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); kv_populate_uvd_table()
929 (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); kv_populate_uvd_table()
931 (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); kv_populate_uvd_table()
934 table->entries[i].vclk, false, &dividers); kv_populate_uvd_table()
940 table->entries[i].dclk, false, &dividers); kv_populate_uvd_table()
992 pi->high_voltage_t < table->entries[i].v) kv_populate_vce_table()
995 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); kv_populate_vce_table()
996 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); kv_populate_vce_table()
999 (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); kv_populate_vce_table()
1002 table->entries[i].evclk, false, &dividers); kv_populate_vce_table()
1055 pi->high_voltage_t < table->entries[i].v) kv_populate_samu_table()
1058 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); kv_populate_samu_table()
1059 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); kv_populate_samu_table()
1062 (u8)kv_get_clk_bypass(adev, table->entries[i].clk); kv_populate_samu_table()
1065 table->entries[i].clk, false, &dividers); kv_populate_samu_table()
1120 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); kv_populate_acp_table()
1121 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); kv_populate_acp_table()
1124 table->entries[i].clk, false, &dividers); kv_populate_acp_table()
1174 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) kv_calculate_dfs_bypass_settings()
1176 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) kv_calculate_dfs_bypass_settings()
1178 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) kv_calculate_dfs_bypass_settings()
1180 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) kv_calculate_dfs_bypass_settings()
1182 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) kv_calculate_dfs_bypass_settings()
1195 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) kv_calculate_dfs_bypass_settings()
1197 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) kv_calculate_dfs_bypass_settings()
1199 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) kv_calculate_dfs_bypass_settings()
1201 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) kv_calculate_dfs_bypass_settings()
1203 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) kv_calculate_dfs_bypass_settings()
1536 if (table->entries[i].evclk >= evclk) kv_get_vce_boot_level()
1630 if (table->entries[i].clk >= 0) /* XXX */ kv_get_acp_boot_level()
1812 if ((table->entries[i].clk >= new_ps->levels[0].sclk) || kv_set_valid_clock_range()
1820 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) kv_set_valid_clock_range()
1826 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > kv_set_valid_clock_range()
1827 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) kv_set_valid_clock_range()
1837 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || kv_set_valid_clock_range()
1845 if (table->entries[i].sclk_frequency <= kv_set_valid_clock_range()
1853 table->entries[pi->highest_valid].sclk_frequency) > kv_set_valid_clock_range()
1854 (table->entries[pi->lowest_valid].sclk_frequency - kv_set_valid_clock_range()
2068 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; kv_construct_max_power_limits_table()
2071 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); kv_construct_max_power_limits_table()
2091 uvd_table->entries[i].v = kv_patch_voltage_values()
2093 uvd_table->entries[i].v); kv_patch_voltage_values()
2098 vce_table->entries[i].v = kv_patch_voltage_values()
2100 vce_table->entries[i].v); kv_patch_voltage_values()
2105 samu_table->entries[i].v = kv_patch_voltage_values()
2107 samu_table->entries[i].v); kv_patch_voltage_values()
2112 acp_table->entries[i].v = kv_patch_voltage_values()
2114 acp_table->entries[i].v); kv_patch_voltage_values()
2207 (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= kv_get_high_voltage_limit()
2219 (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= kv_get_high_voltage_limit()
2262 if (stable_p_state_sclk >= table->entries[i].clk) { kv_apply_state_adjust_rules()
2263 stable_p_state_sclk = table->entries[i].clk; kv_apply_state_adjust_rules()
2269 stable_p_state_sclk = table->entries[0].clk; kv_apply_state_adjust_rules()
2292 ps->levels[i].sclk = table->entries[limit].clk; kv_apply_state_adjust_rules()
2304 ps->levels[i].sclk = table->entries[limit].sclk_frequency; kv_apply_state_adjust_rules()
2455 kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) kv_init_graphics_levels()
2458 kv_set_divider_value(adev, i, table->entries[i].clk); kv_init_graphics_levels()
2461 table->entries[i].v); kv_init_graphics_levels()
2475 kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) kv_init_graphics_levels()
2478 kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); kv_init_graphics_levels()
2479 kv_set_vid(adev, i, table->entries[i].vid_2bit); kv_init_graphics_levels()
/linux-4.4.14/drivers/net/wireless/p54/
H A Deeprom.c84 size_t entries; member in struct:p54_channel_list
157 if ((!list->entries) || (!list->band_channel_num[band])) p54_generate_band()
174 (i < list->entries); i++) { p54_generate_band()
245 for (i = list->entries; i >= 0; i--) { p54_update_channel_param()
252 if ((i < 0) && (list->entries < list->max_entries)) { p54_update_channel_param()
261 i = list->entries++; p54_update_channel_param()
330 if ((priv->iq_autocal_len != priv->curve_data->entries) || p54_generate_channel_lists()
331 (priv->iq_autocal_len != priv->output_limit->entries)) p54_generate_channel_lists()
336 max_channel_num = max_t(unsigned int, priv->output_limit->entries, p54_generate_channel_lists()
339 priv->curve_data->entries); p54_generate_channel_lists()
368 if (i < priv->output_limit->entries) { p54_generate_channel_lists()
384 if (i < priv->curve_data->entries) { p54_generate_channel_lists()
395 sort(list->channels, list->entries, sizeof(struct p54_channel_entry), p54_generate_channel_lists()
438 priv->curve_data->entries = curve_data->channels; p54_convert_rev0()
490 priv->curve_data->entries = curve_data->channels; p54_convert_rev1()
523 size_t db_len, entries; p54_parse_rssical() local
527 entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2; p54_parse_rssical()
528 if (len != sizeof(struct pda_rssi_cal_entry) * entries) { p54_parse_rssical()
540 entries = (len - offset) / p54_parse_rssical()
545 entries == 0) { p54_parse_rssical()
551 db_len = sizeof(*entry) * entries; p54_parse_rssical()
557 priv->rssi_db->entries = entries; p54_parse_rssical()
565 for (i = 0; i < entries; i++) { p54_parse_rssical()
573 for (i = 0; i < entries; i++) { p54_parse_rssical()
591 sort(entry, entries, sizeof(*entry), p54_compare_rssichan, NULL); p54_parse_rssical()
614 for (i = 0; i < priv->rssi_db->entries; i++) { p54_rssi_find()
690 priv->output_limit->entries = data[1]; p54_convert_output_limits()
694 priv->output_limit->entries + p54_convert_output_limits()
707 size_t payload_len, entries, entry_size, offset; p54_convert_db() local
710 entries = le16_to_cpu(src->entries); p54_convert_db()
713 if (((entries * entry_size + offset) != payload_len) || p54_convert_db()
721 dst->entries = entries; p54_convert_db()
845 for (i = 0; i < priv->rssi_db->entries; i++) p54_parse_eeprom()
891 "not all required entries found in eeprom!\n"); p54_parse_eeprom()
/linux-4.4.14/arch/x86/xen/
H A Dmulticalls.c40 struct multicall_entry entries[MC_BATCH]; member in struct:mc_buffer
80 mc = &b->entries[0]; xen_mc_flush()
90 memcpy(b->debug, b->entries, xen_mc_flush()
94 if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) xen_mc_flush()
97 if (b->entries[i].result < 0) xen_mc_flush()
110 b->entries[i].result, xen_mc_flush()
151 ret.mc = &b->entries[b->mcidx]; __xen_mc_entry()
172 b->entries[b->mcidx - 1].op != op)) { xen_mc_extend_args()
182 ret.mc = &b->entries[b->mcidx - 1]; xen_mc_extend_args()
/linux-4.4.14/arch/metag/include/uapi/asm/
H A Dptrace.h98 * @entries: Read pipeline entries
99 * @mask: Mask of valid pipeline entries (RPMask from TXDIVTIME register)
101 * This is the user-visible read pipeline state structure containing the entries
102 * currently in the read pipeline and the mask of valid entries.
107 unsigned long long entries[6]; member in struct:user_rp_state
/linux-4.4.14/drivers/net/ethernet/cisco/enic/
H A Dvnic_wq.h68 /* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
71 #define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
72 ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
74 #define VNIC_WQ_BUF_BLK_SZ(entries) \
75 (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf))
76 #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
77 DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries))
H A Dvnic_rq.h56 /* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
59 #define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
60 ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
62 #define VNIC_RQ_BUF_BLK_SZ(entries) \
63 (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
64 #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
65 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
/linux-4.4.14/arch/mips/kernel/
H A Dsysrq.c17 * Dump TLB entries on all CPUs.
57 .action_msg = "Show TLB entries",
H A Dstacktrace.c27 trace->entries[trace->nr_entries++] = addr; save_raw_context_stack()
55 trace->entries[trace->nr_entries++] = pc; save_context_stack()
/linux-4.4.14/arch/mips/lib/
H A Dr3k_dump_tlb.c46 /* Unused entries have a virtual address of KSEG0. */ dump_tlb()
51 * Only print entries in use dump_tlb()
/linux-4.4.14/arch/cris/kernel/
H A Dstacktrace.c44 trace->entries[trace->nr_entries++] = addr; save_trace()
67 trace->entries[trace->nr_entries++] = ULONG_MAX; save_stack_trace_tsk()
/linux-4.4.14/drivers/lguest/
H A Dsegments.c26 * GDT entries are passed around as "struct desc_struct"s, which like IDT
27 * entries are split into two 32-bit members, "a" and "b". One day, someone
47 * There are several entries we don't let the Guest set. The TSS entry is the
49 * LGUEST_CS and LGUEST_DS entries are reserved for the Switcher, and the
61 * Once the Guest gave us new GDT entries, we fix them up a little. We
99 * a GDT for each CPU, and copy across the Guest's entries each time we want to
103 * constant GDT entries: the ones which are the same no matter what Guest we're
133 * This routine sets up the initial Guest GDT for booting. All entries start
150 * entries.
161 * When the Guest is run on a different CPU, or the GDT entries have changed,
162 * copy_gdt() is called to copy the Guest's GDT entries across to this CPU's
170 * The default entries from setup_default_gdt_entries() are not copy_gdt()
185 * We assume the Guest has the same number of GDT entries as the load_guest_gdt_entry()
189 kill_guest(cpu, "too many gdt entries %i", num); load_guest_gdt_entry()
205 * This is the fast-track version for just changing the three TLS entries.
216 /* Note that just the TLS entries have changed. */ guest_load_tls()
/linux-4.4.14/drivers/iio/accel/
H A DMakefile5 # When adding new entries keep the list in alphabetical order
/linux-4.4.14/drivers/iio/gyro/
H A DMakefile5 # When adding new entries keep the list in alphabetical order
/linux-4.4.14/drivers/crypto/qat/qat_dh895xcc/
H A Dadf_isr.c70 /* If SR-IOV is disabled, add entries for each bank */ adf_enable_msix()
76 pci_dev_info->msix_entries.entries[i].entry = i; adf_enable_msix()
78 pci_dev_info->msix_entries.entries[0].entry = adf_enable_msix()
83 pci_dev_info->msix_entries.entries, adf_enable_msix()
167 struct msix_entry *msixe = pci_dev_info->msix_entries.entries; adf_request_irqs()
215 struct msix_entry *msixe = pci_dev_info->msix_entries.entries; adf_free_irqs()
233 struct msix_entry *entries; adf_isr_alloc_msix_entry_table() local
237 /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */ adf_isr_alloc_msix_entry_table()
241 entries = kzalloc_node(msix_num_entries * sizeof(*entries), adf_isr_alloc_msix_entry_table()
243 if (!entries) adf_isr_alloc_msix_entry_table()
248 kfree(entries); adf_isr_alloc_msix_entry_table()
257 accel_dev->accel_pci_dev.msix_entries.entries = entries; adf_isr_alloc_msix_entry_table()
263 kfree(entries); adf_isr_alloc_msix_entry_table()
273 kfree(accel_dev->accel_pci_dev.msix_entries.entries); adf_isr_free_msix_entry_table()
/linux-4.4.14/arch/x86/include/asm/
H A Dmpx.h20 * The directory is 2G (2^31) in size, and with 8-byte entries
21 * it has 2^28 entries.
29 * entries it has 2^20 entries.
H A Dpgtable-3level_types.h41 * entries per page directory level
H A Dsegment.h17 /* Simple and small GDT entries for booting only: */
112 * Number of entries in the GDT table:
117 * Segment selector values corresponding to the above entries:
179 /* Needs two entries */
181 /* Needs two entries */
191 * Number of entries in the GDT table:
196 * Segment selector values corresponding to the above entries:
/linux-4.4.14/arch/alpha/include/uapi/asm/
H A Dauxvec.h24 #define AT_VECTOR_SIZE_ARCH 4 /* entries in ARCH_DLINFO */
/linux-4.4.14/arch/ia64/kernel/
H A Dstacktrace.c24 trace->entries[trace->nr_entries++] = ip; ia64_do_save_stack()
/linux-4.4.14/drivers/md/
H A Ddm-bio-prison.c300 struct dm_deferred_entry entries[DEFERRED_SET_SIZE]; member in struct:dm_deferred_set
316 ds->entries[i].ds = ds; dm_deferred_set_create()
317 ds->entries[i].count = 0; dm_deferred_set_create()
318 INIT_LIST_HEAD(&ds->entries[i].work_items); dm_deferred_set_create()
337 entry = ds->entries + ds->current_entry; dm_deferred_entry_inc()
353 !ds->entries[ds->sweeper].count) { __sweep()
354 list_splice_init(&ds->entries[ds->sweeper].work_items, head); __sweep()
358 if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count) __sweep()
359 list_splice_init(&ds->entries[ds->sweeper].work_items, head); __sweep()
385 !ds->entries[ds->current_entry].count) dm_deferred_set_add_work()
388 list_add(work, &ds->entries[ds->current_entry].work_items); dm_deferred_set_add_work()
390 if (!ds->entries[next_entry].count) dm_deferred_set_add_work()
H A Ddm-cache-policy-mq.c124 * entries to the back of any of the levels. Think of it as a partially
271 * Sometimes we want to iterate through entries that have been pushed since
272 * a certain event. We use sentinel entries on the queues to delimit these
319 * Rather than storing the cblock in an entry, we allocate all entries in
322 * Free entries are linked together into a list.
325 struct entry *entries, *entries_end; member in struct:entry_pool
334 ep->entries = vzalloc(sizeof(struct entry) * nr_entries); epool_init()
335 if (!ep->entries) epool_init()
338 ep->entries_end = ep->entries + nr_entries; epool_init()
342 list_add(&ep->entries[i].list, &ep->free); epool_init()
351 vfree(ep->entries); epool_exit()
374 struct entry *e = ep->entries + from_cblock(cblock); alloc_particular_entry()
396 struct entry *e = ep->entries + from_cblock(cblock); epool_find()
407 return e >= ep->entries && e < ep->entries_end; in_pool()
412 return to_cblock(e - ep->entries); infer_cblock()
426 * Entries come from two pools, one of pre-cache entries, and one
433 * We maintain three queues of entries. The cache proper,
464 * book keeping effects. eg, decrementing hit counts on entries.
477 * block. Both pre_cache and cache entries are in here.
539 * when to to add entries to the pre_cache and cache, and move between
625 * of the entries.
628 * of the entries in the cache (the first 20 entries across all levels in
629 * ascending order, giving preference to the clean entries at each level).
1210 * Cache entries may not be populated. So we're cannot rely on the clean_target_met()
1387 DMERR("couldn't initialize pool of pre-cache entries"); mq_create()
1392 DMERR("couldn't initialize pool of cache entries"); mq_create()
/linux-4.4.14/drivers/pinctrl/
H A Dpinctrl-adi2.h58 * @nfunction: The number of entries in @functions.
60 * @ngroups: The number of entries in @groups.
62 * @npins: The number of entries in @pins.
/linux-4.4.14/arch/sparc/mm/
H A Dextable.c21 /* Single insn entries are encoded as: search_extable()
25 * Range entries are encoded as: search_extable()
31 * Deleted entries are encoded as: search_extable()
/linux-4.4.14/include/uapi/linux/netfilter_arp/
H A Darp_tables.h30 #define ARPT_ENTRY_ITERATE(entries, size, fn, args...) \
31 XT_ENTRY_ITERATE(struct arpt_entry, entries, size, fn, ## args)
147 /* Number of entries */
150 /* Size of entries. */
163 /* Number of entries */
166 /* Total size of new entries */
175 /* Information about old entries: */
176 /* Number of counters (must be equal to current number of entries). */
178 /* The old entries' counters. */
181 /* The entries (hang off end: not really an array). */
182 struct arpt_entry entries[0]; member in struct:arpt_replace
193 /* The entries. */
/linux-4.4.14/include/uapi/linux/netfilter_ipv4/
H A Dip_tables.h64 #define IPT_ENTRY_ITERATE(entries, size, fn, args...) \
65 XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args)
169 /* Number of entries */
172 /* Size of entries. */
185 /* Number of entries */
188 /* Total size of new entries */
197 /* Information about old entries: */
198 /* Number of counters (must be equal to current number of entries). */
200 /* The old entries' counters. */
203 /* The entries (hang off end: not really an array). */
204 struct ipt_entry entries[0]; member in struct:ipt_replace
215 /* The entries. */
/linux-4.4.14/include/uapi/linux/netfilter_ipv6/
H A Dip6_tables.h57 #define IP6T_ENTRY_ITERATE(entries, size, fn, args...) \
58 XT_ENTRY_ITERATE(struct ip6t_entry, entries, size, fn, ## args)
209 /* Number of entries */
212 /* Size of entries. */
225 /* Number of entries */
228 /* Total size of new entries */
237 /* Information about old entries: */
238 /* Number of counters (must be equal to current number of entries). */
240 /* The old entries' counters. */
243 /* The entries (hang off end: not really an array). */
244 struct ip6t_entry entries[0]; member in struct:ip6t_replace
255 /* The entries. */
/linux-4.4.14/arch/s390/mm/
H A Dextable.c35 * search that we use to find entries in it works properly.
54 /* Normalize entries to being relative to the start of the section */ sort_extable()
60 /* Denormalize all entries */ sort_extable()
/linux-4.4.14/arch/metag/include/asm/
H A Dtlbflush.h20 * FIXME: Meta 2 can flush single TLB entries.
27 /* flush TLB entries for just the current hardware thread */ __flush_tlb()
35 /* flush TLB entries for all hardware threads */ __flush_tlb()
/linux-4.4.14/drivers/scsi/lpfc/
H A Dlpfc_debugfs.h331 * lpfc_debug_dump_q - dump all entries from an specific queue
334 * This function dumps all entries from a queue specified by the queue
361 * lpfc_debug_dump_fcp_wq - dump all entries from a fcp work queue
365 * This function dumps all entries from a FCP work queue specified by the
381 * lpfc_debug_dump_fcp_cq - dump all entries from a fcp work queue's cmpl queue
385 * This function dumps all entries from a FCP complete queue which is
416 * lpfc_debug_dump_hba_eq - dump all entries from a fcp work queue's evt queue
420 * This function dumps all entries from a FCP event queue which is
457 * lpfc_debug_dump_els_wq - dump all entries from the els work queue
460 * This function dumps all entries from the ELS work queue.
471 * lpfc_debug_dump_mbx_wq - dump all entries from the mbox work queue
474 * This function dumps all entries from the MBOX work queue.
485 * lpfc_debug_dump_dat_rq - dump all entries from the receive data queue
488 * This function dumps all entries from the receive data queue.
499 * lpfc_debug_dump_hdr_rq - dump all entries from the receive header queue
502 * This function dumps all entries from the receive header queue.
513 * lpfc_debug_dump_els_cq - dump all entries from the els complete queue
516 * This function dumps all entries from the els complete queue.
528 * lpfc_debug_dump_mbx_cq - dump all entries from the mbox complete queue
531 * This function dumps all entries from the mbox complete queue.
543 * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
547 * This function dumps all entries from a work queue identified by the queue
571 * lpfc_debug_dump_mq_by_id - dump all entries from a mbox queue by queue id
575 * This function dumps all entries from a mbox work queue identified by the
588 * lpfc_debug_dump_rq_by_id - dump all entries from a receive queue by queue id
592 * This function dumps all entries from a receive queue identified by the
610 * lpfc_debug_dump_cq_by_id - dump all entries from a cmpl queue by queue id
614 * This function dumps all entries from a complete queue identified by the
646 * lpfc_debug_dump_eq_by_id - dump all entries from an event queue by queue id
650 * This function dumps all entries from an event queue identified by the
/linux-4.4.14/arch/powerpc/kvm/
H A Dbook3s_64_slb.S48 /* Declare SLB shadow as 0 entries big */
114 /* Remove all SLB entries that are in use. */
120 /* Restore bolted entries from the shadow */
126 /* Declare SLB shadow as SLB_NUM_BOLTED entries big */
133 /* Manually load all entries from shadow SLB */
H A De500_mmu.c76 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1; get_tlb_esel()
86 int size = vcpu_e500->gtlb_params[tlbsel].entries; kvmppc_e500_tlb_index()
156 int size = vcpu_e500->gtlb_params[1].entries; kvmppc_recalc_tlb1map_range()
236 for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++) kvmppc_e500_emul_mt_mmucsr0()
239 for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++) kvmppc_e500_emul_mt_mmucsr0()
260 /* invalidate all entries */ kvmppc_e500_emul_tlbivax()
261 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; kvmppc_e500_emul_tlbivax()
284 /* invalidate all entries */ tlbilx_all()
285 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { tlbilx_all()
828 vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0]; kvm_vcpu_ioctl_config_tlb()
829 vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1]; kvm_vcpu_ioctl_config_tlb()
881 vcpu->arch.tlbcfg[0] |= params[0].entries; vcpu_mmu_init()
886 vcpu->arch.tlbcfg[1] |= params[1].entries; vcpu_mmu_init()
908 int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; kvmppc_e500_tlb_init() local
913 vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE; kvmppc_e500_tlb_init()
914 vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE; kvmppc_e500_tlb_init()
923 vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL); kvmppc_e500_tlb_init()
931 vcpu_e500->gtlb_params[0].entries, kvmppc_e500_tlb_init()
937 vcpu_e500->gtlb_params[1].entries, kvmppc_e500_tlb_init()
943 vcpu_e500->gtlb_params[1].entries, kvmppc_e500_tlb_init()
H A De500_mmu_host.c40 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
47 return host_tlb_params[1].entries - tlbcam_index - 1; tlb1_max_shadow_size()
193 /* Don't bother with unmapped entries */ inval_gtlbe_on_host()
277 sizeof(u64) * vcpu_e500->gtlb_params[1].entries); clear_tlb1_bitmap()
280 sizeof(unsigned int) * host_tlb_params[1].entries); clear_tlb1_bitmap()
289 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { clear_tlb_privs()
731 * Flush all shadow tlb entries everywhere. This is slow, but kvm_unmap_hva()
769 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; e500_mmu_host_init()
770 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; e500_mmu_host_init()
777 if (host_tlb_params[0].entries == 0 || e500_mmu_host_init()
778 host_tlb_params[1].entries == 0) { e500_mmu_host_init()
785 host_tlb_params[1].ways = host_tlb_params[1].entries; e500_mmu_host_init()
787 if (!is_power_of_2(host_tlb_params[0].entries) || e500_mmu_host_init()
789 host_tlb_params[0].entries < host_tlb_params[0].ways || e500_mmu_host_init()
791 pr_err("%s: bad tlb0 host config: %u entries %u ways\n", e500_mmu_host_init()
792 __func__, host_tlb_params[0].entries, e500_mmu_host_init()
798 host_tlb_params[0].entries / host_tlb_params[0].ways; e500_mmu_host_init()
802 host_tlb_params[1].entries, e500_mmu_host_init()
H A Dbook3s_mmu_hpte.c167 /* Find the list of entries in the map */ kvmppc_mmu_pte_flush_page()
172 /* Check the list for matching entries and invalidate */ kvmppc_mmu_pte_flush_page()
186 /* Find the list of entries in the map */ kvmppc_mmu_pte_flush_long()
192 /* Check the list for matching entries and invalidate */ kvmppc_mmu_pte_flush_long()
234 /* Check the list for matching entries and invalidate */ kvmppc_mmu_pte_vflush_short()
256 /* Check the list for matching entries and invalidate */ kvmppc_mmu_pte_vflush_64k()
278 /* Check the list for matching entries and invalidate */ kvmppc_mmu_pte_vflush_long()
/linux-4.4.14/net/netfilter/ipvs/
H A Dip_vs_lblc.c16 * collect stale entries of 24+ hours when
60 * It is for garbage collection of stale IPVS lblc entries,
72 * entries that haven't been touched for a day.
108 struct timer_list periodic_timer; /* collect stale entries */
109 atomic_t entries; /* number of entries */ member in struct:ip_vs_lblc_table
110 int max_size; /* maximum size of entries */
176 atomic_inc(&tbl->entries); ip_vs_lblc_hash()
230 * Flush all the entries of the specified table.
244 atomic_dec(&tbl->entries); ip_vs_lblc_flush()
278 atomic_dec(&tbl->entries); ip_vs_lblc_full_check()
288 * It is used to collect stale entries when the number of entries
292 * entries that have not been used for a long time even
293 * if the number of entries doesn't exceed the maximum size
314 if (atomic_read(&tbl->entries) <= tbl->max_size) { ip_vs_lblc_check_expire()
319 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; ip_vs_lblc_check_expire()
332 atomic_dec(&tbl->entries); ip_vs_lblc_check_expire()
391 /* got to clean up table entries here */ ip_vs_lblc_done_svc()
504 * free up entries from the trash at any time. ip_vs_lblc_schedule()
H A Dip_vs_lblcr.c60 * It is for garbage collection of stale IPVS lblcr entries,
72 * entries that haven't been touched for a day.
278 atomic_t entries; /* number of entries */ member in struct:ip_vs_lblcr_table
279 int max_size; /* maximum size of entries */
280 struct timer_list periodic_timer; /* collect stale entries */
339 atomic_inc(&tbl->entries); ip_vs_lblcr_hash()
396 * Flush all the entries of the specified table.
442 atomic_dec(&tbl->entries); ip_vs_lblcr_full_check()
452 * It is used to collect stale entries when the number of entries
456 * entries that have not been used for a long time even
457 * if the number of entries doesn't exceed the maximum size
478 if (atomic_read(&tbl->entries) <= tbl->max_size) { ip_vs_lblcr_check_expire()
483 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; ip_vs_lblcr_check_expire()
496 atomic_dec(&tbl->entries); ip_vs_lblcr_check_expire()
554 /* got to clean up table entries here */ ip_vs_lblcr_done_svc()
/linux-4.4.14/tools/perf/ui/browsers/
H A Dscripts.c27 struct list_head entries; member in struct:perf_script_browser
127 INIT_LIST_HEAD(&script.entries); script_browse()
163 list_add_tail(&sline->node, &script.entries); script_browse()
181 script.b.entries = &script.entries; script_browse()
/linux-4.4.14/drivers/scsi/snic/
H A Dvnic_wq.h60 /* Break the vnic_wq_buf allocations into blocks of 64 entries */
63 #define VNIC_WQ_BUF_BLK_ENTRIES(entries) \
64 ((unsigned int)(entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \
68 #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
69 DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
70 #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
71 DIV_ROUND_UP(entries, VNIC_WQ_BUF_DFLT_BLK_ENTRIES)
/linux-4.4.14/arch/m68k/include/asm/
H A Dtlbflush.h27 * flush all user-space atc entries.
63 * flush all atc entries (both kernel and user-space entries).
143 /* Clear user TLB entries within the context named in mm */ flush_tlb_mm()
233 * flush all user-space atc entries.
248 * flush all atc entries (both kernel and user-space entries).
/linux-4.4.14/drivers/net/wireless/mediatek/mt7601u/
H A Ddma.c183 q->start = (q->start + 1) % q->entries; mt7601u_rx_get_pending_entry()
203 q->end = (q->end + 1) % q->entries; mt7601u_complete_rx()
244 if (q->used == q->entries - q->entries / 8) mt7601u_complete_tx()
247 q->start = (q->start + 1) % q->entries; mt7601u_complete_tx()
291 if (WARN_ON(q->entries <= q->used)) { mt7601u_dma_submit_tx()
313 q->end = (q->end + 1) % q->entries; mt7601u_dma_submit_tx()
316 if (q->used >= q->entries) mt7601u_dma_submit_tx()
370 for (i = 0; i < dev->rx_q.entries; i++) { mt7601u_kill_rx()
406 for (i = 0; i < dev->rx_q.entries; i++) { mt7601u_submit_rx()
419 for (i = 0; i < dev->rx_q.entries; i++) { mt7601u_free_rx()
431 dev->rx_q.entries = N_RX_ENTRIES; mt7601u_alloc_rx()
450 for (i = 0; i < q->entries; i++) { mt7601u_free_tx_queue()
470 q->entries = N_TX_ENTRIES; mt7601u_alloc_tx_queue()
/linux-4.4.14/drivers/media/dvb-frontends/
H A Ddvb-pll.c74 } entries[]; member in struct:dvb_pll_desc
87 .entries = {
110 .entries = {
133 .entries = {
151 .entries = {
166 .entries = {
188 .entries = {
202 .entries = {
228 .entries = {
261 .entries = {
285 .entries = {
302 .entries = {
319 .entries = {
338 .entries = {
407 .entries = {
452 .entries = {
471 .entries = {
485 .entries = {
493 * more entries, e.g.
506 .entries = {
527 .entries = {
575 if (frequency > desc->entries[i].limit) dvb_pll_configure()
587 desc->entries[i].stepsize/2) / desc->entries[i].stepsize; dvb_pll_configure()
590 buf[2] = desc->entries[i].config; dvb_pll_configure()
591 buf[3] = desc->entries[i].cb; dvb_pll_configure()
601 return (div * desc->entries[i].stepsize) - desc->iffreq; dvb_pll_configure()
/linux-4.4.14/net/bridge/netfilter/
H A Debtables.c222 base = private->entries; ebt_do_table()
438 struct ebt_entry *e = (void *)newinfo->entries + offset; ebt_verify_pointers()
447 repl->entries + offset) ebt_verify_pointers()
509 /* this checks if the previous chain has as many entries ebt_check_entry_size_and_hooks()
512 BUGPRINT("nentries does not equal the nr of entries " ebt_check_entry_size_and_hooks()
561 * entries is a jump to the beginning of a new chain.
836 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */ translate_table()
851 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) { translate_table()
868 i = 0; /* holds the expected nr. of entries for the chain */ translate_table()
869 j = 0; /* holds the up to now counted entries for the chain */ translate_table()
870 k = 0; /* holds the total nr. of entries, should equal translate_table()
873 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, translate_table()
881 BUGPRINT("nentries does not equal the nr of entries in the " translate_table()
915 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
929 cl_s, udc_cnt, i, newinfo->entries)) {
935 - the nr of entries in each chain is right
946 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
949 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1052 EBT_ENTRY_ITERATE(table->entries, table->entries_size, do_replace_finish()
1055 vfree(table->entries); do_replace_finish()
1072 audit_log_format(ab, "table=%s family=%u entries=%u", do_replace_finish()
1083 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, do_replace_finish()
1133 newinfo->entries = vmalloc(tmp.entries_size); do_replace()
1134 if (!newinfo->entries) { do_replace()
1139 newinfo->entries, tmp.entries, tmp.entries_size) != 0) { do_replace()
1140 BUGPRINT("Couldn't copy entries from userspace\n"); do_replace()
1149 vfree(newinfo->entries); do_replace()
1165 repl->entries == NULL || repl->entries_size == 0 || ebt_register_table()
1188 memcpy(p, repl->entries, repl->entries_size); ebt_register_table()
1189 newinfo->entries = p; ebt_register_table()
1197 /* fill in newinfo and parse the entries */ ebt_register_table()
1204 ((char *)repl->hook_entry[i] - repl->entries); ebt_register_table()
1245 vfree(newinfo->entries); ebt_register_table()
1265 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, ebt_unregister_table()
1269 vfree(table->private->entries); ebt_unregister_table()
1436 char *entries; copy_everything_to_user() local
1441 entries = t->private->entries; copy_everything_to_user()
1446 entries = t->table->entries; copy_everything_to_user()
1472 if (copy_to_user(tmp.entries, entries, entries_size)) { copy_everything_to_user()
1473 BUGPRINT("Couldn't copy entries to userspace\n"); copy_everything_to_user()
1477 return EBT_ENTRY_ITERATE(entries, entries_size, copy_everything_to_user()
1478 ebt_make_names, entries, tmp.entries); copy_everything_to_user()
1573 compat_uptr_t entries; member in struct:compat_ebt_replace
1788 const void *entries = info->entries; compat_table_info() local
1793 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, compat_table_info()
1794 entries, newinfo); compat_table_info()
1811 tinfo.entries = t->private->entries; compat_copy_everything_to_user()
1816 tinfo.entries = t->table->entries; compat_copy_everything_to_user()
1848 pos = compat_ptr(tmp.entries); compat_copy_everything_to_user()
1849 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size, compat_copy_everything_to_user()
2173 repl->entries = compat_ptr(tmp.entries); compat_copy_ebt_replace_from_user()
2204 newinfo->entries = vmalloc(tmp.entries_size); compat_do_replace()
2205 if (!newinfo->entries) { compat_do_replace()
2210 newinfo->entries, tmp.entries, tmp.entries_size) != 0) { compat_do_replace()
2215 entries_tmp = newinfo->entries; compat_do_replace()
2229 newinfo->entries = vmalloc(size64); compat_do_replace()
2230 if (!newinfo->entries) { compat_do_replace()
2237 state.buf_kern_start = newinfo->entries; compat_do_replace()
2251 delta = usrptr - tmp.entries; compat_do_replace()
2264 vfree(newinfo->entries); compat_do_replace()
/linux-4.4.14/fs/xfs/
H A Dxfs_attr_list.c62 * Copy out entries of shortform attribute lists for attr_list().
65 * we have to calculate each entries' hashvalue and sort them before
138 * Scan the attribute list for the rest of the entries, storing xfs_attr_shortform_list()
166 * Sort the entries on hash then entno. xfs_attr_shortform_list()
192 * Loop putting entries into the user buffer. xfs_attr_shortform_list()
249 struct xfs_attr_leaf_entry *entries; xfs_attr_node_list() local
264 entries = xfs_attr3_leaf_entryp(leaf); xfs_attr_node_list()
266 entries[leafhdr.count - 1].hashval)) { xfs_attr_node_list()
271 entries[0].hashval)) { xfs_attr_node_list()
361 * Copy out attribute list entries for attr_list(), for leaf attribute lists.
371 struct xfs_attr_leaf_entry *entries; xfs_attr3_leaf_list_int() local
381 entries = xfs_attr3_leaf_entryp(leaf); xfs_attr3_leaf_list_int()
390 entry = &entries[0]; xfs_attr3_leaf_list_int()
409 entry = &entries[0]; xfs_attr3_leaf_list_int()
425 continue; /* skip incomplete entries */ xfs_attr3_leaf_list_int()
487 * Copy out attribute entries for attr_list(), for leaf attribute lists.
568 * Only list entries in the right namespace. xfs_attr_put_listent()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/
H A Dmxms.c107 u8 entries = 0; mxms_foreach() local
124 entries = (ROM32(desc[0]) & 0x01f00000) >> 20; mxms_foreach()
133 entries = (desc[1] & 0xf0) >> 4; mxms_foreach()
141 entries = desc[1] & 0x07; mxms_foreach()
162 for (i = 0; i < entries; i++, dump += recordlen) { mxms_foreach()
174 desc += headerlen + (entries * recordlen); mxms_foreach()
/linux-4.4.14/arch/cris/arch-v10/mm/
H A Dtlb.c23 * to invalidate TLB entries.
26 * so we can make TLB entries that will never match.
33 /* invalidate all TLB entries */
41 /* the vpn of i & 0xf is so we dont write similar TLB entries flush_tlb_all()
75 /* mark the TLB entries that match the page_id as invalid. flush_tlb_mm()
113 /* invalidate those TLB entries that match both the mm context flush_tlb_page()
/linux-4.4.14/arch/cris/arch-v32/mm/
H A Dtlb.c34 * invalidate TLB entries.
37 * it's possible to make TLB entries that will nerver match.
43 /* Flush all TLB entries. */
54 * Mask with 0xf so similar TLB entries aren't written in the same 4-way __flush_tlb_all()
93 /* Mark the TLB entries that match the page_id as invalid. */ __flush_tlb_mm()
138 * Invalidate those TLB entries that match both the mm context and the __flush_tlb_page()
/linux-4.4.14/arch/sh/mm/
H A Dtlb-sh5.c23 cpu_data->dtlb.entries = 64; sh64_tlb_init()
30 ((cpu_data->dtlb.entries - 1) * sh64_tlb_init()
34 cpu_data->itlb.entries = 64; sh64_tlb_init()
40 ((cpu_data->itlb.entries - 1) * sh64_tlb_init()
79 * We don't do any particularly useful tracking of wired entries, sh64_put_wired_dtlb_entry()
83 * We could potentially load wired entries into a list and work on sh64_put_wired_dtlb_entry()
/linux-4.4.14/tools/usb/usbip/libsrc/
H A Dlist.h12 * manipulating whole lists rather than single entries, as
13 * sometimes we already know the next/prev entries and we can
34 * Insert a new entry between two known consecutive entries.
37 * the prev/next entries already!
63 * Delete a list entry by making the prev/next entries
67 * the prev/next entries already!
/linux-4.4.14/drivers/media/usb/pwc/
H A Dpwc-timon.h27 /* This tables contains entries for the 675/680/690 (Timon) camera, with
32 There are 6 * 4 * 4 entries:
39 1 or 2 compressed modes available; in that case entries are duplicated.
/linux-4.4.14/drivers/misc/genwqe/
H A Dcard_debugfs.c50 int entries) dbg_uidn_show()
55 for (i = 0; i < entries; i++) { dbg_uidn_show()
67 int entries; curr_dbg_uidn_show() local
70 entries = genwqe_ffdc_buff_size(cd, uid); curr_dbg_uidn_show()
71 if (entries < 0) curr_dbg_uidn_show()
74 if (entries == 0) curr_dbg_uidn_show()
77 regs = kcalloc(entries, sizeof(*regs), GFP_KERNEL); curr_dbg_uidn_show()
82 genwqe_ffdc_buff_read(cd, uid, regs, entries); curr_dbg_uidn_show()
85 dbg_uidn_show(s, regs, entries); curr_dbg_uidn_show()
115 dbg_uidn_show(s, cd->ffdc[uid].regs, cd->ffdc[uid].entries); prev_dbg_uidn_show()
156 break; /* invalid entries */ genwqe_curr_regs_show()
180 break; /* invalid entries */ genwqe_prev_regs_show()
49 dbg_uidn_show(struct seq_file *s, struct genwqe_reg *regs, int entries) dbg_uidn_show() argument
/linux-4.4.14/arch/s390/kernel/
H A Dstacktrace.c31 trace->entries[trace->nr_entries++] = addr; save_context_stack()
52 trace->entries[trace->nr_entries++] = addr; save_context_stack()
94 trace->entries[trace->nr_entries++] = ULONG_MAX; save_stack_trace_tsk()
/linux-4.4.14/arch/metag/tbx/
H A Dtbistring.c21 * ensuring that creating new entries does not interfere with reading old
22 * entries in any way.
99 /* Skip matching entries with no translation data */ __TBITransStr()
/linux-4.4.14/arch/mn10300/mm/
H A Dcache-dbg-flush-by-tag.S48 mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,e0 # total number of entries
53 or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
103 # retain valid entries in the cache
/linux-4.4.14/arch/arm/mm/
H A Dpv-fixup-asm.S31 /* Update level 2 entries covering the kernel */
43 /* Update level 2 entries for the boot data */
56 /* Update level 1 entries */
H A Dproc-arm940.S102 * There is no efficient way to flush a range of cache entries
115 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
118 bcs 2b @ entries 63 to 0
165 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
168 bcs 2b @ entries 63 to 0
187 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
190 bcs 2b @ entries 63 to 0
210 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
213 bcs 2b @ entries 63 to 0
232 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
240 bcs 2b @ entries 63 to 0
/linux-4.4.14/drivers/xen/xen-pciback/
H A Dpciback_ops.c214 struct msix_entry *entries; xen_pcibk_enable_msix() local
229 * to access the BARs where the MSI-X entries reside. xen_pcibk_enable_msix()
236 entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); xen_pcibk_enable_msix()
237 if (entries == NULL) xen_pcibk_enable_msix()
241 entries[i].entry = op->msix_entries[i].entry; xen_pcibk_enable_msix()
242 entries[i].vector = op->msix_entries[i].vector; xen_pcibk_enable_msix()
245 result = pci_enable_msix_exact(dev, entries, op->value); xen_pcibk_enable_msix()
248 op->msix_entries[i].entry = entries[i].entry; xen_pcibk_enable_msix()
249 if (entries[i].vector) { xen_pcibk_enable_msix()
251 xen_pirq_from_irq(entries[i].vector); xen_pcibk_enable_msix()
263 kfree(entries); xen_pcibk_enable_msix()
/linux-4.4.14/drivers/scsi/arm/
H A Dmsgqueue.c59 msgq->free = &msgq->entries[0]; msgqueue_initialise()
62 msgq->entries[i].next = &msgq->entries[i + 1]; msgqueue_initialise()
64 msgq->entries[NR_MESSAGES - 1].next = NULL; msgqueue_initialise()
/linux-4.4.14/drivers/md/persistent-data/
H A Ddm-space-map-metadata.h17 * We have one block of index, which can hold 255 index entries. Each
H A Ddm-btree.h128 * overwrote. Useful if you're keeping track of the number of entries in a
155 * Returns < 0 on failure. Otherwise the number of key entries that have
156 * been filled out. Remember trees can have zero entries, and as such have
163 * Returns < 0 on failure. Otherwise the number of key entries that have
164 * been filled out. Remember trees can have zero entries, and as such have
/linux-4.4.14/drivers/net/wireless/ath/ath6kl/
H A Dtrace.h121 unsigned int entries, struct hif_scatter_item *list),
123 TP_ARGS(addr, flags, total_len, entries, list),
129 __field(unsigned int, entries)
131 __dynamic_array(unsigned int, len_array, entries)
142 __entry->entries = entries;
152 for (i = 0; i < entries; i++) {
164 "%s addr 0x%x flags 0x%x entries %d total_len %zd\n",
168 __entry->entries,
/linux-4.4.14/arch/unicore32/include/uapi/asm/
H A Dsigcontext.h18 * before the signal handler was invoked. Note: only add new entries
/linux-4.4.14/arch/xtensa/include/asm/
H A Dsysmem.h24 * between adjacent bank entries.
/linux-4.4.14/fs/nfsd/
H A Dnfscache.c26 * of entries, then this should be the average number of entries per bucket.
38 /* max number of entries allowed in the cache */
50 /* total number of entries */
91 * ...with a hard cap of 256k entries. In the worst case, each entry will be
234 * Don't free entries attached to calls that are still prune_bucket()
249 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
250 * Also prune the oldest ones when the total exceeds the max number of entries.
349 unsigned int entries = 0; nfsd_cache_search() local
352 ++entries; list_for_each_entry()
360 if (entries > longest_chain) {
361 longest_chain = entries;
363 } else if (entries == longest_chain) {
502 * If we should start to use different types of cache entries tailored
592 seq_printf(m, "max entries: %u\n", max_drc_entries); nfsd_reply_cache_stats_show()
593 seq_printf(m, "num entries: %u\n", nfsd_reply_cache_stats_show()
H A Dcache.h60 * attrstat replies. Using cache entries with fixed length instead
74 /* Cache entries expire after this time period */
/linux-4.4.14/fs/nilfs2/
H A Dalloc.h33 * nilfs_palloc_entries_per_group - get the number of entries per group
36 * The number of entries per group is defined by the number of bits
58 * @pr_entry_bh: buffer head of the buffer containing translation entries
97 * @prev_entry: translation entries cache
/linux-4.4.14/net/appletalk/
H A Dsysctl_net_atalk.c6 * Dynamic registration, added aarp entries. (5/30/97 Chris Horn)
/linux-4.4.14/arch/sh/boards/mach-se/7751/
H A Dirq.c19 /* Add additional entries here as drivers are added and tested. */
/linux-4.4.14/arch/frv/include/uapi/asm/
H A Dsigcontext.h18 * before the signal handler was invoked. Note: only add new entries
/linux-4.4.14/arch/arm/include/uapi/asm/
H A Dsigcontext.h6 * before the signal handler was invoked. Note: only add new entries
/linux-4.4.14/fs/squashfs/
H A Dcache.c74 for (i = cache->curr_blk, n = 0; n < cache->entries; n++) { squashfs_cache_get()
79 i = (i + 1) % cache->entries; squashfs_cache_get()
82 if (n == cache->entries) { squashfs_cache_get()
84 * Block not in cache, if all cache entries are used squashfs_cache_get()
102 for (n = 0; n < cache->entries; n++) { squashfs_cache_get()
105 i = (i + 1) % cache->entries; squashfs_cache_get()
108 cache->next_blk = (i + 1) % cache->entries; squashfs_cache_get()
217 for (i = 0; i < cache->entries; i++) { squashfs_cache_delete()
232 * Initialise cache allocating the specified number of entries, each of
236 struct squashfs_cache *squashfs_cache_init(char *name, int entries, squashfs_cache_init() argument
247 cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); squashfs_cache_init()
255 cache->unused = entries; squashfs_cache_init()
256 cache->entries = entries; squashfs_cache_init()
265 for (i = 0; i < entries; i++) { squashfs_cache_init()
/linux-4.4.14/drivers/infiniband/hw/mlx5/
H A Dcq.c613 int entries, struct mlx5_create_cq_mbox_in **cqb, create_cq_user()
641 entries * ucmd.cqe_size, create_cq_user()
656 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); create_cq_user()
699 int entries, int cqe_size, create_cq_kernel()
713 err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); create_cq_kernel()
751 int entries = attr->cqe; mlx5_ib_create_cq() local
766 if (entries < 0) mlx5_ib_create_cq()
769 entries = roundup_pow_of_two(entries + 1); mlx5_ib_create_cq()
770 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) mlx5_ib_create_cq()
777 cq->ibcq.cqe = entries - 1; mlx5_ib_create_cq()
784 err = create_cq_user(dev, udata, context, cq, entries, mlx5_ib_create_cq()
791 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, mlx5_ib_create_cq()
799 cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index); mlx5_ib_create_cq()
881 * adds new entries after this loop -- the QP we're worried __mlx5_ib_cq_clean()
882 * about is already in RESET, so the new entries won't come __mlx5_ib_cq_clean()
889 /* Now sweep backwards through the CQ, removing CQ entries __mlx5_ib_cq_clean()
890 * that match our QP by copying older entries on top of them. __mlx5_ib_cq_clean()
959 int entries, struct ib_udata *udata, int *npas, resize_user()
975 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, resize_user()
997 int entries, int cqe_size) resize_kernel()
1005 err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); resize_kernel()
1080 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) mlx5_ib_resize_cq() argument
1097 if (entries < 1) mlx5_ib_resize_cq()
1100 entries = roundup_pow_of_two(entries + 1); mlx5_ib_resize_cq()
1101 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) mlx5_ib_resize_cq()
1104 if (entries == ibcq->cqe + 1) mlx5_ib_resize_cq()
1109 err = resize_user(dev, cq, entries, udata, &npas, &page_shift, mlx5_ib_resize_cq()
1113 err = resize_kernel(dev, cq, entries, cqe_size); mlx5_ib_resize_cq()
1142 in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24); mlx5_ib_resize_cq()
1151 cq->ibcq.cqe = entries - 1; mlx5_ib_resize_cq()
1170 cq->ibcq.cqe = entries - 1; mlx5_ib_resize_cq()
611 create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, struct ib_ucontext *context, struct mlx5_ib_cq *cq, int entries, struct mlx5_create_cq_mbox_in **cqb, int *cqe_size, int *index, int *inlen) create_cq_user() argument
698 create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, int entries, int cqe_size, struct mlx5_create_cq_mbox_in **cqb, int *index, int *inlen) create_cq_kernel() argument
958 resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, int entries, struct ib_udata *udata, int *npas, int *page_shift, int *cqe_size) resize_user() argument
996 resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, int entries, int cqe_size) resize_kernel() argument
/linux-4.4.14/fs/xfs/libxfs/
H A Dxfs_da_format.h80 __be16 __count; /* count of active entries */
86 __be16 __count; /* count of active entries */
247 __uint8_t count; /* count of entries */
258 * number for version 3 directory entries.
307 * As all the entries are variable size structures the accessors below should
441 * As all the entries are variable size structures the accessors below should
457 __be16 count; /* count of entries */
458 __be16 stale; /* count of stale entries */
463 __be16 count; /* count of entries */
464 __be16 stale; /* count of stale entries */
496 xfs_dir2_leaf_entry_t __ents[]; /* entries */
501 struct xfs_dir2_leaf_entry __ents[]; /* entries */
528 __be32 nvalid; /* count of valid entries */
529 __be32 nused; /* count of used entries */
535 /* unused entries are -1 */
541 __be32 nvalid; /* count of valid entries */
542 __be32 nused; /* count of used entries */
549 /* unused entries are -1 */
589 * As all the entries are variable size structures the accessors below should
594 __be32 count; /* count of leaf entries */
595 __be32 stale; /* count of stale lf entries */
599 * Pointer to the leaf entries embedded in a data block (1-block format)
618 * bottom but are not packed. The freemap contains run-length-encoded entries
682 xfs_attr_leaf_entry_t entries[1]; /* sorted on key, not name */ member in struct:xfs_attr_leafblock
685 * leaf entries, growing from the bottom up. The variables are never
687 * accesses to the 'entries' array above index 0 so don't do that.
714 struct xfs_attr_leaf_entry entries[1]; member in struct:xfs_attr3_leafblock
718 * leaf entries, growing from the bottom up. The variables are never
783 * Alignment for namelist and valuelist entries (since they are mixed
800 return &((struct xfs_attr3_leafblock *)leafp)->entries[0]; xfs_attr3_leaf_entryp()
801 return &leafp->entries[0]; xfs_attr3_leaf_entryp()
810 struct xfs_attr_leaf_entry *entries = xfs_attr3_leaf_entryp(leafp); xfs_attr3_leaf_name() local
812 return &((char *)leafp)[be16_to_cpu(entries[idx].nameidx)]; xfs_attr3_leaf_name()
/linux-4.4.14/drivers/md/bcache/
H A Djournal.h11 * Journal entries contain a list of keys, ordered by the time they were
38 * journal entries
47 * might contain keys for many journal entries - we handle this by making sure
49 * entries it has keys for.
54 * journal entries; from that and the current journal sequence number we compute
68 * space to write to, or we could have too many open journal entries and run out
75 * nodes that are pinning the oldest journal entries first.
79 * Only used for holding the journal entries we read in btree_journal_read()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/bios/
H A Dbit.c31 u8 entries = nvbios_rd08(bios, bios->bit_offset + 10); bit_entry() local
33 while (entries--) { bit_entry()
/linux-4.4.14/drivers/crypto/qce/
H A Dcipher.h33 * @src_nents: source entries
34 * @dst_nents: destination entries
/linux-4.4.14/drivers/acpi/apei/
H A Derst.c378 ERST_TAB_ENTRY(erst_tab), erst_tab->entries); erst_exec_ctx_init()
436 u64 *entries; member in struct:erst_record_id_cache
485 u64 *entries; __erst_record_id_cache_add_one() local
506 entries = erst_record_id_cache.entries; __erst_record_id_cache_add_one()
508 if (entries[i] == id) __erst_record_id_cache_add_one()
526 alloc_size = new_size * sizeof(entries[0]); __erst_record_id_cache_add_one()
533 memcpy(new_entries, entries, __erst_record_id_cache_add_one()
534 erst_record_id_cache.len * sizeof(entries[0])); __erst_record_id_cache_add_one()
536 kfree(entries); __erst_record_id_cache_add_one()
538 vfree(entries); __erst_record_id_cache_add_one()
539 erst_record_id_cache.entries = entries = new_entries; __erst_record_id_cache_add_one()
542 entries[i] = id; __erst_record_id_cache_add_one()
556 u64 *entries; erst_get_record_id_next() local
566 entries = erst_record_id_cache.entries; erst_get_record_id_next()
568 if (entries[*pos] != APEI_ERST_INVALID_RECORD_ID) erst_get_record_id_next()
572 *record_id = entries[*pos]; erst_get_record_id_next()
583 *record_id = erst_record_id_cache.entries[*pos]; erst_get_record_id_next()
601 u64 *entries; __erst_record_id_cache_compact() local
606 entries = erst_record_id_cache.entries; __erst_record_id_cache_compact()
608 if (entries[i] == APEI_ERST_INVALID_RECORD_ID) __erst_record_id_cache_compact()
611 entries[wpos] = entries[i]; __erst_record_id_cache_compact()
876 u64 *entries; erst_clear() local
892 entries = erst_record_id_cache.entries; erst_clear()
894 if (entries[i] == record_id) erst_clear()
895 entries[i] = APEI_ERST_INVALID_RECORD_ID; erst_clear()
920 if (erst_tab->entries != erst_check_table()
/linux-4.4.14/arch/um/kernel/
H A Dstacktrace.c56 trace->entries[trace->nr_entries++] = address; save_addr()
67 trace->entries[trace->nr_entries++] = ULONG_MAX; __save_stack_trace()
/linux-4.4.14/arch/x86/um/
H A Dldt.c70 if (copy_to_user(ptr, ldt->u.entries, size)) read_ldt()
158 memcpy(&entry0, ldt->u.entries, write_ldt()
172 memcpy(ldt->u.pages[0]+1, ldt->u.entries+1, write_ldt()
182 ldt_p = ldt->u.entries + ldt_info.entry_number; write_ldt()
312 * inherited from the host. All ldt-entries found init_new_ldt()
336 memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries, init_new_ldt()
337 sizeof(new_mm->arch.ldt.u.entries)); init_new_ldt()
/linux-4.4.14/net/802/
H A Dp8022.c13 * Unlike the 802.3 datalink we have a list of 802.2 entries as
15 * short (3 or 4 entries at most). The current demux assumes this.
/linux-4.4.14/include/uapi/linux/netfilter_bridge/
H A Debtables.h45 /* total size of the entries */
53 char __user *entries; member in struct:ebt_replace
61 /* total size of the entries */
69 char *entries; member in struct:ebt_replace_kernel
83 /* nr. of entries */
92 * ebt_entries struct when traversing the entries from start to end.
243 #define EBT_ENTRY_ITERATE(entries, size, fn, args...) \
250 __entry = (void *)(entries) + __i; \
/linux-4.4.14/arch/powerpc/include/asm/
H A Dmmu-40x.h11 * TLB entries are defined by a "high" tag portion and a "low" data
14 * TLB entries are managed entirely under software control by reading,
H A Dpte-fsl-booke.h14 entries use the top 29 bits.
/linux-4.4.14/arch/mips/include/asm/
H A Dtlbflush.h9 * - flush_tlb_all() flushes all processes TLB entries
10 * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
/linux-4.4.14/arch/sh/include/asm/
H A Dtlb_64.h24 * for_each_dtlb_entry - Iterate over free (non-wired) DTLB entries
34 * for_each_itlb_entry - Iterate over free (non-wired) ITLB entries
H A Dfixmap.h38 * TLB entries of such buffers will not be flushed across
50 * The FIX_CMAP entries are used by kmap_coherent() to get virtual
66 * FIX_IOREMAP entries are useful for mapping physical address
/linux-4.4.14/arch/sparc/kernel/
H A Dstacktrace.c56 trace->entries[trace->nr_entries++] = pc; __save_stack_trace()
64 trace->entries[trace->nr_entries++] = pc; __save_stack_trace()
/linux-4.4.14/arch/mips/boot/
H A Decoff.h26 unsigned short s_nreloc; /* number of relocation entries */
27 unsigned short s_nlnno; /* number of gp histogram entries */
/linux-4.4.14/arch/nios2/include/asm/
H A Dtlbflush.h27 * - flush_tlb_all() flushes all processes TLB entries
28 * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
/linux-4.4.14/drivers/staging/rdma/ipath/
H A Dipath_cq.c119 * ipath_poll_cq - poll for work completion entries
121 * @num_entries: the maximum number of entries to return
124 * Returns the number of completion entries polled.
173 * and poll for all pending entries. If a new completion entry send_complete()
205 int entries = attr->cqe; ipath_create_cq() local
215 if (entries < 1 || entries > ib_ipath_max_cqes) { ipath_create_cq()
228 * Allocate the completion queue entries and head/tail pointers. ipath_create_cq()
232 * numbers of entries. ipath_create_cq()
236 sz += sizeof(struct ib_uverbs_wc) * (entries + 1); ipath_create_cq()
238 sz += sizeof(struct ib_wc) * (entries + 1); ipath_create_cq()
285 * The number of entries should be >= the number requested or return ipath_create_cq()
288 cq->ibcq.cqe = entries; ipath_create_cq()
391 * Need to use vmalloc() if we want to support large #s of entries. ipath_resize_cq()
/linux-4.4.14/drivers/staging/rdma/hfi1/
H A Dcq.c142 * hfi1_poll_cq - poll for work completion entries
144 * @num_entries: the maximum number of entries to return
147 * Returns the number of completion entries polled.
196 * and poll for all pending entries. If a new completion entry send_complete()
242 unsigned int entries = attr->cqe; hfi1_create_cq() local
247 if (entries < 1 || entries > hfi1_max_cqes) hfi1_create_cq()
256 * Allocate the completion queue entries and head/tail pointers. hfi1_create_cq()
260 * numbers of entries. hfi1_create_cq()
264 sz += sizeof(struct ib_uverbs_wc) * (entries + 1); hfi1_create_cq()
266 sz += sizeof(struct ib_wc) * (entries + 1); hfi1_create_cq()
313 * The number of entries should be >= the number requested or return hfi1_create_cq()
317 cq->ibcq.cqe = entries; hfi1_create_cq()
420 * Need to use vmalloc() if we want to support large #s of entries. hfi1_resize_cq()
/linux-4.4.14/drivers/infiniband/hw/qib/
H A Dqib_cq.c126 * qib_poll_cq - poll for work completion entries
128 * @num_entries: the maximum number of entries to return
131 * Returns the number of completion entries polled.
180 * and poll for all pending entries. If a new completion entry send_complete()
220 int entries = attr->cqe; qib_create_cq() local
230 if (entries < 1 || entries > ib_qib_max_cqes) { qib_create_cq()
243 * Allocate the completion queue entries and head/tail pointers. qib_create_cq()
247 * numbers of entries. qib_create_cq()
251 sz += sizeof(struct ib_uverbs_wc) * (entries + 1); qib_create_cq()
253 sz += sizeof(struct ib_wc) * (entries + 1); qib_create_cq()
300 * The number of entries should be >= the number requested or return qib_create_cq()
304 cq->ibcq.cqe = entries; qib_create_cq()
407 * Need to use vmalloc() if we want to support large #s of entries. qib_resize_cq()
/linux-4.4.14/drivers/staging/lustre/lustre/fld/
H A Dfld_internal.h76 * fld cache entries are sorted on range->lsr_start field. */
92 * Preferred number of cached entries */
96 * Current number of cached entries. Protected by \a fci_lock */
100 * LRU list fld entries. */
104 * sorted fld entries. */
/linux-4.4.14/drivers/sh/intc/
H A Dvirq.c205 struct intc_subgroup_entry *entries[32]; intc_subgroup_map() local
214 (void ***)entries, 0, ARRAY_SIZE(entries), intc_subgroup_map()
221 entry = radix_tree_deref_slot((void **)entries[i]); intc_subgroup_map()
257 radix_tree_replace_slot((void **)entries[i], intc_subgroup_map()
/linux-4.4.14/drivers/pinctrl/qcom/
H A Dpinctrl-msm.h22 * @ngroups: Number of entries in @groups.
34 * @npins: Number of entries in @pins.
103 * @npins: The number of entries in @pins.
105 * @nfunctions: The number of entries in @functions.
107 * @ngroups: The numbmer of entries in @groups.
/linux-4.4.14/arch/mips/vdso/
H A Dgenvdso.h65 * Ensure the GOT has no entries other than the standard 2, for the same patch_vdso()
67 * The standard two entries are: patch_vdso()
77 * This member holds the number of local GOT entries. patch_vdso()
83 * This member holds the number of entries in the patch_vdso()
104 "%s: '%s' contains unexpected GOT entries\n", patch_vdso()
/linux-4.4.14/arch/arm/kernel/
H A Dstacktrace.c84 trace->entries[trace->nr_entries++] = addr; save_trace()
103 trace->entries[trace->nr_entries++] = regs->ARM_pc; save_trace()
128 trace->entries[trace->nr_entries++] = ULONG_MAX; __save_stack_trace()
147 trace->entries[trace->nr_entries++] = ULONG_MAX; __save_stack_trace()
166 trace->entries[trace->nr_entries++] = ULONG_MAX; save_stack_trace_regs()
/linux-4.4.14/drivers/infiniband/hw/mlx4/
H A Dcq.c175 int entries = attr->cqe; mlx4_ib_create_cq() local
182 if (entries < 1 || entries > dev->dev->caps.max_cqes) mlx4_ib_create_cq()
192 entries = roundup_pow_of_two(entries + 1); mlx4_ib_create_cq()
193 cq->ibcq.cqe = entries - 1; mlx4_ib_create_cq()
211 ucmd.buf_addr, entries); mlx4_ib_create_cq()
231 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries); mlx4_ib_create_cq()
241 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, mlx4_ib_create_cq()
284 int entries) mlx4_alloc_resize_buf()
295 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); mlx4_alloc_resize_buf()
302 cq->resize_buf->cqe = entries - 1; mlx4_alloc_resize_buf()
308 int entries, struct ib_udata *udata) mlx4_alloc_resize_umem()
324 &cq->resize_umem, ucmd.buf_addr, entries); mlx4_alloc_resize_umem()
331 cq->resize_buf->cqe = entries - 1; mlx4_alloc_resize_umem()
372 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) mlx4_ib_resize_cq() argument
381 if (entries < 1 || entries > dev->dev->caps.max_cqes) { mlx4_ib_resize_cq()
386 entries = roundup_pow_of_two(entries + 1); mlx4_ib_resize_cq()
387 if (entries == ibcq->cqe + 1) { mlx4_ib_resize_cq()
392 if (entries > dev->dev->caps.max_cqes + 1) { mlx4_ib_resize_cq()
398 err = mlx4_alloc_resize_umem(dev, cq, entries, udata); mlx4_ib_resize_cq()
404 if (entries < outst_cqe + 1) { mlx4_ib_resize_cq()
409 err = mlx4_alloc_resize_buf(dev, cq, entries); mlx4_ib_resize_cq()
416 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); mlx4_ib_resize_cq()
945 * adds new entries after this loop -- the QP we're worried __mlx4_ib_cq_clean()
946 * about is already in RESET, so the new entries won't come __mlx4_ib_cq_clean()
954 * Now sweep backwards through the CQ, removing CQ entries __mlx4_ib_cq_clean()
955 * that match our QP by copying older entries on top of them. __mlx4_ib_cq_clean()
283 mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, int entries) mlx4_alloc_resize_buf() argument
307 mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, int entries, struct ib_udata *udata) mlx4_alloc_resize_umem() argument
/linux-4.4.14/drivers/gpu/drm/amd/include/
H A Dpptable.h486 UCHAR ucNumEntries; // Number of entries.
487 ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries. member in struct:_ATOM_PPLIB_Clock_Voltage_Dependency_Table
502 UCHAR ucNumEntries; // Number of entries.
503 ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries. member in struct:_ATOM_PPLIB_Clock_Voltage_Limit_Table
526 UCHAR ucNumEntries; // Number of entries.
527 ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries. member in struct:_ATOM_PPLIB_CAC_Leakage_Table
541 UCHAR ucNumEntries; // Number of entries.
542 ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries. member in struct:_ATOM_PPLIB_PhaseSheddingLimits_Table
554 VCEClockInfo entries[1]; member in struct:_VCEClockInfoArray
566 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1]; member in struct:_ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
578 ATOM_PPLIB_VCE_State_Record entries[1]; member in struct:_ATOM_PPLIB_VCE_State_Table
600 UVDClockInfo entries[1]; member in struct:_UVDClockInfoArray
612 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1]; member in struct:_ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
631 ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1]; member in struct:_ATOM_PPLIB_SAMClk_Voltage_Limit_Table
649 ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1]; member in struct:_ATOM_PPLIB_ACPClk_Voltage_Limit_Table
/linux-4.4.14/drivers/misc/mic/host/
H A Dmic_smpt.c58 /* Total Cumulative system memory accessible by MIC across all SMPT entries */ mic_max_system_memory()
79 int entries, struct mic_device *mdev) mic_add_smpt_entry()
84 for (i = spt; i < spt + entries; i++, mic_add_smpt_entry()
100 int entries, s64 *ref, size_t size) mic_smpt_op()
112 /* find existing entries */ mic_smpt_op()
117 } else if (ae) /* cannot find contiguous entries */ mic_smpt_op()
120 if (ae == entries) mic_smpt_op()
127 if (ae == entries) mic_smpt_op()
136 spt = i - entries + 1; mic_smpt_op()
138 mic_add_smpt_entry(spt, ref, dma_addr, entries, mdev); mic_smpt_op()
146 * Returns number of smpt entries needed for dma_addr to dma_addr + size
147 * also returns the reference count array for each of those entries
277 /* Get number of smpt entries to be mapped, ref count array */ mic_unmap()
78 mic_add_smpt_entry(int spt, s64 *ref, u64 addr, int entries, struct mic_device *mdev) mic_add_smpt_entry() argument
99 mic_smpt_op(struct mic_device *mdev, u64 dma_addr, int entries, s64 *ref, size_t size) mic_smpt_op() argument
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
H A Dport.c62 table->entries[i] = 0; mlx4_init_mac_table()
75 table->entries[i] = 0; mlx4_init_vlan_table()
97 if (index < 0 || index >= table->max || !table->entries[index]) { validate_index()
112 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) find_index()
120 __be64 *entries) mlx4_set_port_mac_table()
130 memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE); mlx4_set_port_mac_table()
152 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { mlx4_find_cached_mac()
181 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { __mlx4_register_mac()
192 /* No free mac entries */ __mlx4_register_mac()
198 table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID); __mlx4_register_mac()
200 err = mlx4_set_port_mac_table(dev, port, table->entries); __mlx4_register_mac()
204 table->entries[free] = 0; __mlx4_register_mac()
276 table->entries[index] = 0; __mlx4_unregister_mac()
277 mlx4_set_port_mac_table(dev, port, table->entries); __mlx4_unregister_mac()
322 table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID); __mlx4_replace_mac()
324 err = mlx4_set_port_mac_table(dev, port, table->entries); __mlx4_replace_mac()
328 table->entries[index] = 0; __mlx4_replace_mac()
337 __be32 *entries) mlx4_set_port_vlan_table()
347 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); mlx4_set_port_vlan_table()
366 be32_to_cpu(table->entries[i])))) { mlx4_find_cached_vlan()
387 /* No free vlan entries */ __mlx4_register_vlan()
400 be32_to_cpu(table->entries[i])))) { __mlx4_register_vlan()
415 table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID); __mlx4_register_vlan()
417 err = mlx4_set_port_vlan_table(dev, port, table->entries); __mlx4_register_vlan()
421 table->entries[free] = 0; __mlx4_register_vlan()
475 table->entries[index] = 0; __mlx4_unregister_vlan()
476 mlx4_set_port_vlan_table(dev, port, table->entries); __mlx4_unregister_vlan()
770 /* change to MULTIPLE entries: number of guest's gids mlx4_common_set_port()
795 * entries in the port GID table mlx4_common_set_port()
119 mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, __be64 *entries) mlx4_set_port_mac_table() argument
336 mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, __be32 *entries) mlx4_set_port_vlan_table() argument
/linux-4.4.14/drivers/pci/
H A Dmsi.c484 /* Determine how many msi entries we have */ populate_msi_sysfs()
677 struct msix_entry *entries, int nvec) msix_setup_entries()
695 entry->msi_attrib.entry_nr = entries[i].entry; msix_setup_entries()
707 struct msix_entry *entries) msix_program_entries()
713 int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + for_each_pci_msi_entry()
716 entries[i].vector = entry->irq; for_each_pci_msi_entry()
726 * @entries: pointer to an array of struct msix_entry entries
727 * @nvec: number of @entries
731 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
734 struct msix_entry *entries, int nvec) msix_capability_init()
749 ret = msix_setup_entries(dev, base, entries, nvec); msix_capability_init()
757 /* Check if all MSI entries honor device restrictions */ msix_capability_init()
770 msix_program_entries(dev, entries); msix_capability_init()
910 * pci_msix_vec_count - return the number of device's MSI-X table entries
912 * This function returns the number of device's MSI-X table entries and
932 * @entries: pointer to an array of MSI-X entries
944 int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) pci_enable_msix() argument
952 if (!entries) pci_enable_msix()
961 /* Check for any invalid entries */ pci_enable_msix()
963 if (entries[i].entry >= nr_entries) pci_enable_msix()
966 if (entries[i].entry == entries[j].entry) pci_enable_msix()
977 return msix_capability_init(dev, entries, nvec); pci_enable_msix()
1089 * @entries: pointer to an array of MSI-X entries
1101 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, pci_enable_msix_range() argument
1111 rc = pci_enable_msix(dev, entries, nvec); pci_enable_msix_range()
676 msix_setup_entries(struct pci_dev *dev, void __iomem *base, struct msix_entry *entries, int nvec) msix_setup_entries() argument
706 msix_program_entries(struct pci_dev *dev, struct msix_entry *entries) msix_program_entries() argument
733 msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, int nvec) msix_capability_init() argument
/linux-4.4.14/drivers/input/
H A Dsparse-keymap.c162 * @setup: Function that can be used to adjust keymap entries
265 * @value: Value that should be reported (ignored by %KE_SW entries)
267 * entries right after reporting press event, ignored by all other
268 * entries
302 * @value: Value that should be reported (ignored by %KE_SW entries)
304 * entries right after reporting press event, ignored by all other
305 * entries
/linux-4.4.14/net/8021q/
H A Dvlanproc.c38 /* Methods for preparing data for reading proc entries */
51 * Names of the proc directory entries
60 * entries:
108 * Proc filesystem directory entries.
123 * Clean up /proc/net/vlan entries
136 /* Dynamically added entries should be cleaned up as their vlan_device vlan_proc_cleanup()
142 * Create /proc/net/vlan entries
/linux-4.4.14/fs/nfs/
H A Dmount_clnt.c420 u32 entries, i; decode_auth_flavors() local
429 entries = be32_to_cpup(p); decode_auth_flavors()
430 dprintk("NFS: received %u auth flavors\n", entries); decode_auth_flavors()
431 if (entries > NFS_MAX_SECFLAVORS) decode_auth_flavors()
432 entries = NFS_MAX_SECFLAVORS; decode_auth_flavors()
434 p = xdr_inline_decode(xdr, 4 * entries); decode_auth_flavors()
438 if (entries > *count) decode_auth_flavors()
439 entries = *count; decode_auth_flavors()
441 for (i = 0; i < entries; i++) { decode_auth_flavors()
/linux-4.4.14/arch/mips/mm/
H A Dtlb-r4k.c84 /* Make sure all entries differ. */ local_flush_tlb_all()
100 /* All entries common to a mm share an asid. To effectively flush
101 these entries, we just bump the asid. */ local_flush_tlb_mm()
150 /* Make sure all entries differ. */ local_flush_tlb_range()
196 /* Make sure all entries differ. */ local_flush_tlb_kernel_range()
233 /* Make sure all entries differ. */ local_flush_tlb_page()
268 /* Make sure all entries differ. */ local_flush_tlb_one()
421 * Used for loading TLB entries before trap_init() has started, when we
525 printk("Restricting TLB to %d entries\n", ntlb); tlb_init()
/linux-4.4.14/sound/pci/trident/
H A Dtrident_memory.c38 do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \
43 (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1))
71 /* fill TLB entries -- we need to fill two entries */ set_tlb_bus()
101 /* fill TLB entries -- UNIT_PAGES entries must be filled */ set_tlb_bus()
214 /* set TLB entries */ snd_trident_alloc_sg_pages()
261 /* set TLB entries */ snd_trident_alloc_cont_pages()
307 /* reset TLB entries */ snd_trident_free_pages()
/linux-4.4.14/drivers/scsi/qla2xxx/
H A Dqla_gs.c253 * @list: switch info entries to populate
303 gid_data = &ct_rsp->rsp.gid_pt.entries[i]; qla2x00_gid_pt()
333 * @list: switch info entries to populate
396 * @list: switch info entries to populate
805 * @list: switch info entries to populate
878 * @list: switch info entries to populate
934 * @list: switch info entries to populate
1269 void *entries; qla2x00_fdmi_rhba() local
1291 entries = ct_req->req.rhba.hba_identifier; qla2x00_fdmi_rhba()
1294 eiter = entries + size; qla2x00_fdmi_rhba()
1304 eiter = entries + size; qla2x00_fdmi_rhba()
1317 eiter = entries + size; qla2x00_fdmi_rhba()
1337 eiter = entries + size; qla2x00_fdmi_rhba()
1350 eiter = entries + size; qla2x00_fdmi_rhba()
1363 eiter = entries + size; qla2x00_fdmi_rhba()
1387 eiter = entries + size; qla2x00_fdmi_rhba()
1400 eiter = entries + size; qla2x00_fdmi_rhba()
1413 eiter = entries + size; qla2x00_fdmi_rhba()
1432 entries, size); qla2x00_fdmi_rhba()
1479 void *entries; qla2x00_fdmi_rpa() local
1500 entries = ct_req->req.rpa.port_name; qla2x00_fdmi_rpa()
1503 eiter = entries + size; qla2x00_fdmi_rpa()
1515 eiter = entries + size; qla2x00_fdmi_rpa()
1555 eiter = entries + size; qla2x00_fdmi_rpa()
1598 eiter = entries + size; qla2x00_fdmi_rpa()
1611 eiter = entries + size; qla2x00_fdmi_rpa()
1624 eiter = entries + size; qla2x00_fdmi_rpa()
1648 entries, size); qla2x00_fdmi_rpa()
1690 void *entries; qla2x00_fdmiv2_rhba() local
1714 entries = ct_req->req.rhba2.hba_identifier; qla2x00_fdmiv2_rhba()
1717 eiter = entries + size; qla2x00_fdmiv2_rhba()
1727 eiter = entries + size; qla2x00_fdmiv2_rhba()
1741 eiter = entries + size; qla2x00_fdmiv2_rhba()
1761 eiter = entries + size; qla2x00_fdmiv2_rhba()
1774 eiter = entries + size; qla2x00_fdmiv2_rhba()
1787 eiter = entries + size; qla2x00_fdmiv2_rhba()
1811 eiter = entries + size; qla2x00_fdmiv2_rhba()
1824 eiter = entries + size; qla2x00_fdmiv2_rhba()
1838 eiter = entries + size; qla2x00_fdmiv2_rhba()
1851 eiter = entries + size; qla2x00_fdmiv2_rhba()
1871 eiter = entries + size; qla2x00_fdmiv2_rhba()
1884 eiter = entries + size; qla2x00_fdmiv2_rhba()
1897 eiter = entries + size; qla2x00_fdmiv2_rhba()
1907 eiter = entries + size; qla2x00_fdmiv2_rhba()
1917 eiter = entries + size; qla2x00_fdmiv2_rhba()
1927 eiter = entries + size; qla2x00_fdmiv2_rhba()
1940 eiter = entries + size; qla2x00_fdmiv2_rhba()
1959 entries, size); qla2x00_fdmiv2_rhba()
2055 void *entries; qla2x00_fdmiv2_rpa() local
2075 entries = ct_req->req.rpa2.port_name; qla2x00_fdmiv2_rpa()
2078 eiter = entries + size; qla2x00_fdmiv2_rpa()
2090 eiter = entries + size; qla2x00_fdmiv2_rpa()
2130 eiter = entries + size; qla2x00_fdmiv2_rpa()
2165 eiter = entries + size; qla2x00_fdmiv2_rpa()
2178 eiter = entries + size; qla2x00_fdmiv2_rpa()
2191 eiter = entries + size; qla2x00_fdmiv2_rpa()
2210 eiter = entries + size; qla2x00_fdmiv2_rpa()
2220 eiter = entries + size; qla2x00_fdmiv2_rpa()
2230 eiter = entries + size; qla2x00_fdmiv2_rpa()
2243 eiter = entries + size; qla2x00_fdmiv2_rpa()
2253 eiter = entries + size; qla2x00_fdmiv2_rpa()
2263 eiter = entries + size; qla2x00_fdmiv2_rpa()
2273 eiter = entries + size; qla2x00_fdmiv2_rpa()
2287 eiter = entries + size; qla2x00_fdmiv2_rpa()
2297 eiter = entries + size; qla2x00_fdmiv2_rpa()
2307 eiter = entries + size; qla2x00_fdmiv2_rpa()
2322 entries, size); qla2x00_fdmiv2_rpa()
2415 * @list: switch info entries to populate
2523 * @list: switch info entries to populate
2630 * @list: switch info entries to populate
/linux-4.4.14/drivers/uwb/
H A Dest.c51 u8 entries; member in struct:uwb_est
236 * right position (entries are sorted by type, event_high, vendor and
257 const struct uwb_est_entry *entry, size_t entries) uwb_est_register()
283 uwb_est[itr].entries = entries; uwb_est_register()
306 const struct uwb_est_entry *entry, size_t entries) uwb_est_unregister()
315 .entries = entries uwb_est_unregister()
362 if (event_low >= est->entries) { /* in range? */ uwb_est_get_size()
365 est->entries, event_low); uwb_est_get_size()
373 est->entries, event_low); uwb_est_get_size()
394 est->product, est->entries); uwb_est_get_size()
256 uwb_est_register(u8 type, u8 event_high, u16 vendor, u16 product, const struct uwb_est_entry *entry, size_t entries) uwb_est_register() argument
305 uwb_est_unregister(u8 type, u8 event_high, u16 vendor, u16 product, const struct uwb_est_entry *entry, size_t entries) uwb_est_unregister() argument
/linux-4.4.14/drivers/scsi/csiostor/
H A Dcsio_isr.c504 struct msix_entry *entries; csio_enable_msix() local
515 entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL); csio_enable_msix()
516 if (!entries) csio_enable_msix()
520 entries[i].entry = (uint16_t)i; csio_enable_msix()
524 cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt); csio_enable_msix()
526 kfree(entries); csio_enable_msix()
538 entryp->vector = entries[i].vector; csio_enable_msix()
543 csio_set_nondata_intr_idx(hw, entries[k].entry); csio_enable_msix()
544 csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry); csio_enable_msix()
545 csio_set_fwevt_intr_idx(hw, entries[k++].entry); csio_enable_msix()
552 hw->sqset[i][j].intr_idx = entries[n].entry; csio_enable_msix()
558 kfree(entries); csio_enable_msix()
/linux-4.4.14/drivers/net/wireless/rt2x00/
H A Drt2x00queue.c559 * 2) Rule 1 can be broken when the available entries rt2x00queue_kick_tx_queue()
824 if (fn(&queue->entries[i], data)) rt2x00queue_for_each_entry()
829 if (fn(&queue->entries[i], data)) rt2x00queue_for_each_entry()
834 if (fn(&queue->entries[i], data)) rt2x00queue_for_each_entry()
857 entry = &queue->entries[queue->index[index]]; rt2x00queue_get_entry()
1094 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); queue_for_each()
1100 struct queue_entry *entries; rt2x00queue_alloc_entries() local
1107 * Allocate all queue entries. rt2x00queue_alloc_entries()
1109 entry_size = sizeof(*entries) + queue->priv_size; rt2x00queue_alloc_entries()
1110 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); rt2x00queue_alloc_entries()
1111 if (!entries) rt2x00queue_alloc_entries()
1119 entries[i].flags = 0; rt2x00queue_alloc_entries()
1120 entries[i].queue = queue; rt2x00queue_alloc_entries()
1121 entries[i].skb = NULL; rt2x00queue_alloc_entries()
1122 entries[i].entry_idx = i; rt2x00queue_alloc_entries()
1123 entries[i].priv_data = rt2x00queue_alloc_entries()
1124 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, rt2x00queue_alloc_entries()
1125 sizeof(*entries), queue->priv_size); rt2x00queue_alloc_entries()
1130 queue->entries = entries; rt2x00queue_alloc_entries()
1139 if (!queue->entries) rt2x00queue_free_skbs()
1143 rt2x00queue_free_skb(&queue->entries[i]); rt2x00queue_free_skbs()
1153 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL); rt2x00queue_alloc_rxskbs()
1156 queue->entries[i].skb = skb; rt2x00queue_alloc_rxskbs()
1194 rt2x00_err(rt2x00dev, "Queue entries allocation failed\n");
1208 kfree(queue->entries); queue_for_each()
1209 queue->entries = NULL; queue_for_each()
/linux-4.4.14/arch/x86/mm/
H A Dextable.c104 * search that we use to find entries in it works properly.
129 /* Convert all entries to being relative to the start of the section */ sort_extable()
141 /* Denormalize all entries */ sort_extable()
/linux-4.4.14/samples/kfifo/
H A Ddma-example.c60 * We need two different SG entries: one for the free space area at the example_init()
66 printk(KERN_INFO "DMA sgl entries: %d\n", nents); example_init()
96 printk(KERN_INFO "DMA sgl entries: %d\n", nents); example_init()
/linux-4.4.14/arch/cris/mm/
H A Dtlb.c20 * to invalidate TLB entries.
23 * so we can make TLB entries that will never match.
47 /* throw out any TLB entries belonging to the mm we replace alloc_context()
/linux-4.4.14/fs/reiserfs/
H A Ditem_ops.c462 * Create an array of sizes of directory entries for virtual
505 /* compare total size of entries with item length */ direntry_create_vi()
530 * return number of entries which may fit into specified amount of
537 int entries = 0; direntry_check_left() local
546 entries++; direntry_check_left()
549 if (entries == dir_u->entry_count) { direntry_check_left()
557 && entries < 2) direntry_check_left()
558 entries = 0; direntry_check_left()
560 return entries ? : -1; direntry_check_left()
566 int entries = 0; direntry_check_right() local
575 entries++; direntry_check_right()
577 BUG_ON(entries == dir_u->entry_count); direntry_check_right()
581 && entries > dir_u->entry_count - 2) direntry_check_right()
582 entries = dir_u->entry_count - 2; direntry_check_right()
584 return entries ? : -1; direntry_check_right()
587 /* sum of entry sizes between from-th and to-th entries including both edges */ direntry_part_size()
622 printk("%d entries: ", dir_u->entry_count); direntry_print_vi()
/linux-4.4.14/drivers/iio/adc/
H A DMakefile5 # When adding new entries keep the list in alphabetical order
/linux-4.4.14/drivers/parisc/
H A Diommu-helpers.h7 * @nents: The number of entries in the scatter/gather list.
91 ** in the DMA stream. Allocates PDIR entries but does not fill them.
122 /* PARANOID: clear entries */ iommu_coalesce_chunks()
139 /* PARANOID: clear entries */ iommu_coalesce_chunks()
/linux-4.4.14/drivers/pinctrl/nomadik/
H A Dpinctrl-nomadik.h97 * @ngroups: The number of entries in @groups.
128 * @npins: The number of entries in @pins.
130 * @nfunction: The number of entries in @functions.
132 * @ngroups: The number of entries in @groups.
/linux-4.4.14/arch/um/include/asm/
H A Dpgtable-2level.h20 * entries per page directory level: the i386 is two-level, so
/linux-4.4.14/arch/x86/kvm/
H A Dcpuid.h10 struct kvm_cpuid_entry2 __user *entries,
14 struct kvm_cpuid_entry __user *entries);
17 struct kvm_cpuid_entry2 __user *entries);
20 struct kvm_cpuid_entry2 __user *entries);
/linux-4.4.14/arch/x86/um/asm/
H A Dmm_context.h33 struct ldt_entry entries[LDT_DIRECT_ENTRIES]; member in union:uml_ldt::__anon3224
/linux-4.4.14/arch/arc/include/asm/
H A Dtlb.h19 * This pair is called at time of munmap/exit to flush cache and TLB entries
/linux-4.4.14/fs/ext4/
H A Dnamei.c212 struct dx_entry entries[0]; member in struct:dx_root
218 struct dx_entry entries[0]; member in struct:dx_node
225 struct dx_entry *entries; member in struct:dx_frame
248 static unsigned dx_get_count(struct dx_entry *entries);
249 static unsigned dx_get_limit(struct dx_entry *entries);
250 static void dx_set_count(struct dx_entry *entries, unsigned value);
251 static void dx_set_limit(struct dx_entry *entries, unsigned value);
535 static inline unsigned dx_get_count(struct dx_entry *entries) dx_get_count() argument
537 return le16_to_cpu(((struct dx_countlimit *) entries)->count); dx_get_count()
540 static inline unsigned dx_get_limit(struct dx_entry *entries) dx_get_limit() argument
542 return le16_to_cpu(((struct dx_countlimit *) entries)->limit); dx_get_limit()
545 static inline void dx_set_count(struct dx_entry *entries, unsigned value) dx_set_count() argument
547 ((struct dx_countlimit *) entries)->count = cpu_to_le16(value); dx_set_count()
550 static inline void dx_set_limit(struct dx_entry *entries, unsigned value) dx_set_limit() argument
552 ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value); dx_set_limit()
578 static void dx_show_index(char * label, struct dx_entry *entries) dx_show_index() argument
580 int i, n = dx_get_count (entries); dx_show_index()
583 printk("%x->%lu ", i ? dx_get_hash(entries + i) : dx_show_index()
584 0, (unsigned long)dx_get_block(entries + i)); dx_show_index()
686 struct dx_entry *entries, int levels) dx_show_entries()
689 unsigned count = dx_get_count(entries), names = 0, space = 0, i; dx_show_entries()
693 for (i = 0; i < count; i++, entries++) dx_show_entries()
695 ext4_lblk_t block = dx_get_block(entries); dx_show_entries()
696 ext4_lblk_t hash = i ? dx_get_hash(entries): 0; dx_show_entries()
697 u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash; dx_show_entries()
704 dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1): dx_show_entries()
734 struct dx_entry *at, *entries, *p, *q, *m; dx_probe() local
775 entries = (struct dx_entry *)(((char *)&root->info) + dx_probe()
778 if (dx_get_limit(entries) != dx_root_limit(dir, dx_probe()
781 dx_get_limit(entries), dx_probe()
788 count = dx_get_count(entries); dx_probe()
789 if (!count || count > dx_get_limit(entries)) { dx_probe()
792 count, dx_get_limit(entries)); dx_probe()
796 p = entries + 1; dx_probe()
797 q = entries + count - 1; dx_probe()
809 at = entries; dx_probe()
823 dxtrace(printk(" %x->%u\n", at == entries ? 0 : dx_get_hash(at), dx_probe()
825 frame->entries = entries; dx_probe()
836 entries = ((struct dx_node *) frame->bh->b_data)->entries; dx_probe()
838 if (dx_get_limit(entries) != dx_node_limit(dir)) { dx_probe()
841 dx_get_limit(entries), dx_node_limit(dir)); dx_probe()
897 * If we run out of entries in the interior node, loop around and ext4_htree_next_block()
903 if (++(p->at) < p->entries + dx_get_count(p->entries)) ext4_htree_next_block()
936 p->at = p->entries = ((struct dx_node *) bh->b_data)->entries; ext4_htree_next_block()
944 * directory block. It returns the number directory entries loaded
1039 * This function returns the number of entries inserted into the tree,
1127 * Stop if: (a) there are no more entries, or ext4_htree_fill_tree()
1136 dxtrace(printk(KERN_DEBUG "Fill tree: returned %d entries, " ext4_htree_fill_tree()
1161 * Returns number of entries mapped.
1216 struct dx_entry *entries = frame->entries; dx_insert_block() local
1218 int count = dx_get_count(entries); dx_insert_block()
1220 assert(count < dx_get_limit(entries)); dx_insert_block()
1221 assert(old < entries + count); dx_insert_block()
1222 memmove(new + 1, new, (char *)(entries + count) - (char *)(new)); dx_insert_block()
1225 dx_set_count(entries, count + 1); dx_insert_block()
1629 * Move count entries from end of map between two memory locations.
1679 * Allocate a new block, and move entries so that they are approx. equally full.
1781 dxtrace(dx_show_index("frame", frame->entries)); do_split()
1938 struct dx_entry *entries; make_indexed_dir() local
2004 entries = root->entries; make_indexed_dir()
2005 dx_set_block(entries, 1); make_indexed_dir()
2006 dx_set_count(entries, 1); make_indexed_dir()
2007 dx_set_limit(entries, dx_root_limit(dir, sizeof(root->info))); make_indexed_dir()
2018 frame->entries = entries; make_indexed_dir()
2019 frame->at = entries; make_indexed_dir()
2160 struct dx_entry *entries, *at; ext4_dx_add_entry() local
2170 entries = frame->entries; ext4_dx_add_entry()
2189 dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n", ext4_dx_add_entry()
2190 dx_get_count(entries), dx_get_limit(entries))); ext4_dx_add_entry()
2192 if (dx_get_count(entries) == dx_get_limit(entries)) { ext4_dx_add_entry()
2194 unsigned icount = dx_get_count(entries); ext4_dx_add_entry()
2200 if (levels && (dx_get_count(frames->entries) == ext4_dx_add_entry()
2201 dx_get_limit(frames->entries))) { ext4_dx_add_entry()
2212 entries2 = node2->entries; ext4_dx_add_entry()
2222 unsigned hash2 = dx_get_hash(entries + icount1); ext4_dx_add_entry()
2232 memcpy((char *) entries2, (char *) (entries + icount1), ext4_dx_add_entry()
2234 dx_set_count(entries, icount1); ext4_dx_add_entry()
2239 if (at - entries >= icount1) { ext4_dx_add_entry()
2240 frame->at = at = at - entries - icount1 + entries2; ext4_dx_add_entry()
2241 frame->entries = entries = entries2; ext4_dx_add_entry()
2245 dxtrace(dx_show_index("node", frames[1].entries)); ext4_dx_add_entry()
2247 ((struct dx_node *) bh2->b_data)->entries)); ext4_dx_add_entry()
2255 memcpy((char *) entries2, (char *) entries, ext4_dx_add_entry()
2260 dx_set_count(entries, 1); ext4_dx_add_entry()
2261 dx_set_block(entries + 0, newblock); ext4_dx_add_entry()
2266 frame->at = at = at - entries + entries2; ext4_dx_add_entry()
2267 frame->entries = entries = entries2; ext4_dx_add_entry()
2809 * list entries can cause panics at unmount time. ext4_orphan_add()
685 dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir, struct dx_entry *entries, int levels) dx_show_entries() argument
/linux-4.4.14/scripts/kconfig/
H A Dlist.h76 * Insert a new entry between two known consecutive entries.
79 * the prev/next entries already!
105 * Delete a list entry by making the prev/next entries
109 * the prev/next entries already!
/linux-4.4.14/fs/adfs/
H A Ddir_f.h27 * Directory entries
/linux-4.4.14/include/asm-generic/
H A Dcacheflush.h8 * The cache doesn't need to be flushed when TLB entries change when
/linux-4.4.14/include/linux/power/
H A Dgpio-charger.h28 * @num_supplicants: Number of entries in the supplied_to array
/linux-4.4.14/arch/powerpc/kernel/
H A Dstacktrace.c37 trace->entries[trace->nr_entries++] = ip; save_context_stack()

Completed in 7819 milliseconds

1234567891011>>