Searched refs:slots (Results 1 - 200 of 789) sorted by relevance

1234

/linux-4.1.27/drivers/gpu/ipu-v3/
H A Dipu-dmfc.c106 unsigned slots; member in struct:dmfc_channel
175 static int ipu_dmfc_setup_channel(struct dmfc_channel *dmfc, int slots, ipu_dmfc_setup_channel() argument
182 "dmfc: using %d slots starting from segment %d for IPU channel %d\n", ipu_dmfc_setup_channel()
183 slots, segment, dmfc->data->ipu_channel); ipu_dmfc_setup_channel()
185 switch (slots) { ipu_dmfc_setup_channel()
226 dmfc->slots = slots; ipu_dmfc_setup_channel()
229 dmfc->slotmask = ((1 << slots) - 1) << segment; ipu_dmfc_setup_channel()
237 int slots = 1; dmfc_bandwidth_to_slots() local
239 while (slots * priv->bandwidth_per_slot < bandwidth) dmfc_bandwidth_to_slots()
240 slots *= 2; dmfc_bandwidth_to_slots()
242 return slots; dmfc_bandwidth_to_slots()
245 static int dmfc_find_slots(struct ipu_dmfc_priv *priv, int slots) dmfc_find_slots() argument
250 slotmask_need = (1 << slots) - 1; dmfc_find_slots()
271 dev_dbg(priv->dev, "dmfc: freeing %d slots starting from segment %d\n", ipu_dmfc_free_bandwidth()
272 dmfc->slots, dmfc->segment); ipu_dmfc_free_bandwidth()
276 if (!dmfc->slots) ipu_dmfc_free_bandwidth()
280 dmfc->slots = 0; ipu_dmfc_free_bandwidth()
287 if (priv->channels[i].slots > 0) { ipu_dmfc_free_bandwidth()
289 dmfc_find_slots(priv, priv->channels[i].slots); ipu_dmfc_free_bandwidth()
291 ((1 << priv->channels[i].slots) - 1) << ipu_dmfc_free_bandwidth()
297 if (priv->channels[i].slots > 0) ipu_dmfc_free_bandwidth()
299 priv->channels[i].slots, ipu_dmfc_free_bandwidth()
312 int slots = dmfc_bandwidth_to_slots(priv, bandwidth_pixel_per_second); ipu_dmfc_alloc_bandwidth() local
323 if (slots > 8) { ipu_dmfc_alloc_bandwidth()
328 /* For the MEM_BG channel, first try to allocate twice the slots */ ipu_dmfc_alloc_bandwidth()
330 segment = dmfc_find_slots(priv, slots * 2); ipu_dmfc_alloc_bandwidth()
331 else if (slots < 2) ipu_dmfc_alloc_bandwidth()
332 /* Always allocate at least 128*4 bytes (2 slots) */ ipu_dmfc_alloc_bandwidth()
333 slots = 2; ipu_dmfc_alloc_bandwidth()
336 slots *= 2; ipu_dmfc_alloc_bandwidth()
338 segment = dmfc_find_slots(priv, slots); ipu_dmfc_alloc_bandwidth()
344 ipu_dmfc_setup_channel(dmfc, slots, segment, burstsize); ipu_dmfc_alloc_bandwidth()
360 if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines) ipu_dmfc_init_channel()
420 * into 8 slots. ipu_dmfc_init()
424 dev_dbg(dev, "dmfc: 8 slots with %ldMpixel/s bandwidth each\n", ipu_dmfc_init()
/linux-4.1.27/arch/ia64/scripts/
H A Dunwcheck.py26 def check_func (func, slots, rlen_sum):
27 if slots != rlen_sum:
31 print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
37 slots = 0 variable
42 check_func(func, slots, rlen_sum)
47 slots = 3 * (end - start) / 16
54 check_func(func, slots, rlen_sum)
/linux-4.1.27/drivers/input/
H A Dinput-mt.c27 * input_mt_init_slots() - initialize MT input slots
29 * @num_slots: number of slots used by the device
39 * reinitialize with a different number of slots.
52 mt = kzalloc(sizeof(*mt) + num_slots * sizeof(*mt->slots), GFP_KERNEL); input_mt_init_slots()
91 /* Mark slots as 'inactive' */ input_mt_init_slots()
93 input_mt_set_value(&mt->slots[i], ABS_MT_TRACKING_ID, -1); input_mt_init_slots()
95 /* Mark slots as 'unused' */ input_mt_init_slots()
107 * input_mt_destroy_slots() - frees the MT slots of the input device
108 * @dev: input device with allocated MT slots
111 * automatically free the MT slots when the device is destroyed.
125 * @dev: input device with allocated MT slots
145 slot = &mt->slots[mt->slot]; input_mt_report_slot_state()
164 * @dev: input device with allocated MT slots
185 * @dev: input device with allocated MT slots
208 struct input_mt_slot *ps = &mt->slots[i]; input_mt_report_pointer_emulation()
247 if (!input_mt_is_used(mt, &mt->slots[i])) { __input_mt_drop_unused()
255 * input_mt_drop_unused() - Inactivate slots not seen in this frame
256 * @dev: input device with allocated MT slots
258 * Lift all slots not seen since the last call to this function.
273 * @dev: input device with allocated MT slots
276 * Depending on the flags, marks unused slots as inactive and performs
353 for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { input_mt_set_matrix()
368 int *slots, int num_pos) input_mt_set_slots()
374 slots[j] = -1; input_mt_set_slots()
376 for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { input_mt_set_slots()
382 slots[j] = s - mt->slots; input_mt_set_slots()
390 for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { input_mt_set_slots()
395 if (slots[j] < 0) { input_mt_set_slots()
396 slots[j] = s - mt->slots; input_mt_set_slots()
405 * @dev: input device with allocated MT slots
406 * @slots: the slot assignment to be filled
413 * slots.
417 * some contacts are assigned to unused slots.
421 int input_mt_assign_slots(struct input_dev *dev, int *slots, input_mt_assign_slots() argument
438 input_mt_set_slots(mt, slots, num_pos); input_mt_assign_slots()
446 * @dev: input device with allocated MT slots
464 for (s = mt->slots; s != mt->slots + mt->num_slots; s++) input_mt_get_slot_by_key()
466 return s - mt->slots; input_mt_get_slot_by_key()
468 for (s = mt->slots; s != mt->slots + mt->num_slots; s++) input_mt_get_slot_by_key()
471 return s - mt->slots; input_mt_get_slot_by_key()
367 input_mt_set_slots(struct input_mt *mt, int *slots, int num_pos) input_mt_set_slots() argument
/linux-4.1.27/arch/blackfin/mach-common/
H A Dscb-init.c14 inline void scb_mi_write(unsigned long scb_mi_arbw, unsigned int slots, scb_mi_write() argument
19 for (i = 0; i < slots; ++i) scb_mi_write()
24 inline void scb_mi_read(unsigned long scb_mi_arbw, unsigned int slots, scb_mi_read() argument
29 for (i = 0; i < slots; ++i) { scb_mi_read()
/linux-4.1.27/sound/pci/ac97/
H A Dac97_pcm.c310 * slots 7+8 snd_ac97_set_rate()
327 unsigned short slots = 0; get_pslots() local
341 slots |= (1<<AC97_SLOT_PCM_LEFT)|(1<<AC97_SLOT_PCM_RIGHT); get_pslots()
343 slots |= (1<<AC97_SLOT_PCM_SLEFT)|(1<<AC97_SLOT_PCM_SRIGHT); get_pslots()
345 slots |= (1<<AC97_SLOT_PCM_CENTER)|(1<<AC97_SLOT_LFE); get_pslots()
358 slots |= (1<<AC97_SLOT_PCM_SLEFT)|(1<<AC97_SLOT_PCM_SRIGHT); get_pslots()
360 slots |= (1<<AC97_SLOT_PCM_CENTER)|(1<<AC97_SLOT_LFE); get_pslots()
370 slots |= (1<<AC97_SLOT_PCM_CENTER)|(1<<AC97_SLOT_LFE); get_pslots()
376 return slots; get_pslots()
378 unsigned short slots; get_pslots() local
379 slots = (1<<AC97_SLOT_PCM_LEFT)|(1<<AC97_SLOT_PCM_RIGHT); get_pslots()
381 slots |= (1<<AC97_SLOT_PCM_SLEFT)|(1<<AC97_SLOT_PCM_SRIGHT); get_pslots()
383 slots |= (1<<AC97_SLOT_PCM_CENTER)|(1<<AC97_SLOT_LFE); get_pslots()
393 return slots; get_pslots()
399 unsigned short slots; get_cslots() local
403 slots = (1<<AC97_SLOT_PCM_LEFT)|(1<<AC97_SLOT_PCM_RIGHT); get_cslots()
404 slots |= (1<<AC97_SLOT_MIC); get_cslots()
405 return slots; get_cslots()
408 static unsigned int get_rates(struct ac97_pcm *pcm, unsigned int cidx, unsigned short slots, int dbl) get_rates() argument
415 if (!(slots & (1 << i))) get_rates()
435 * snd_ac97_pcm_assign - assign AC97 slots to given PCM streams
440 * It assigns available AC97 slots for given PCMs. If none or only
441 * some slots are available, pcm->xxx.slots and pcm->xxx.rslots[] members
455 unsigned short tmp, slots; snd_ac97_pcm_assign() local
494 slots = pcm->r[0].slots; snd_ac97_pcm_assign()
495 for (j = 0; j < 4 && slots; j++) { snd_ac97_pcm_assign()
505 tmp &= slots; snd_ac97_pcm_assign()
512 tmp &= pcm->r[0].slots; snd_ac97_pcm_assign()
525 slots &= ~tmp; snd_ac97_pcm_assign()
526 rpcm->r[0].slots |= tmp; snd_ac97_pcm_assign()
535 if ((tmp & pcm->r[1].slots) == tmp) { snd_ac97_pcm_assign()
536 rpcm->r[1].slots = tmp; snd_ac97_pcm_assign()
564 * @slots: a subset of allocated slots (snd_ac97_pcm_assign) for this pcm
566 * It locks the specified slots and sets the given rate to AC97 registers.
571 enum ac97_pcm_cfg cfg, unsigned short slots) snd_ac97_pcm_open()
591 if (!(slots & (1 << i))) snd_ac97_pcm_open()
617 if (!(slots & (1 << i))) snd_ac97_pcm_open()
642 pcm->aslots = slots; snd_ac97_pcm_open()
646 pcm->aslots = slots; snd_ac97_pcm_open()
657 * It frees the locked AC97 slots.
664 unsigned short slots = pcm->aslots; snd_ac97_pcm_close() local
670 if (!(slots & (1 << i))) snd_ac97_pcm_close()
685 if (!(slots & (1 << i))) snd_ac97_pcm_close()
570 snd_ac97_pcm_open(struct ac97_pcm *pcm, unsigned int rate, enum ac97_pcm_cfg cfg, unsigned short slots) snd_ac97_pcm_open() argument
/linux-4.1.27/drivers/misc/mei/
H A Dinterrupt.c173 int slots; mei_cl_irq_disconnect_rsp() local
176 slots = mei_hbuf_empty_slots(dev); mei_cl_irq_disconnect_rsp()
179 if (slots < msg_slots) mei_cl_irq_disconnect_rsp()
208 int slots; mei_cl_irq_disconnect() local
211 slots = mei_hbuf_empty_slots(dev); mei_cl_irq_disconnect()
213 if (slots < msg_slots) mei_cl_irq_disconnect()
248 int slots; mei_cl_irq_read() local
252 slots = mei_hbuf_empty_slots(dev); mei_cl_irq_read()
254 if (slots < msg_slots) mei_cl_irq_read()
285 int slots; mei_cl_irq_connect() local
289 slots = mei_hbuf_empty_slots(dev); mei_cl_irq_connect()
294 if (slots < msg_slots) mei_cl_irq_connect()
319 * @slots: slots to read.
324 struct mei_cl_cb *cmpl_list, s32 *slots) mei_irq_read_handler()
332 (*slots)--; mei_irq_read_handler()
333 dev_dbg(dev->dev, "slots =%08x.\n", *slots); mei_irq_read_handler()
345 if (mei_slots2data(*slots) < mei_hdr->length) { mei_irq_read_handler()
347 *slots); mei_irq_read_handler()
388 /* reset the number of slots and header */ mei_irq_read_handler()
389 *slots = mei_count_full_read_slots(dev); mei_irq_read_handler()
392 if (*slots == -EOVERFLOW) { mei_irq_read_handler()
394 dev_err(dev->dev, "resetting due to slots overflow.\n"); mei_irq_read_handler()
420 s32 slots; mei_irq_write_handler() local
427 slots = mei_hbuf_empty_slots(dev); mei_irq_write_handler()
428 if (slots <= 0) mei_irq_write_handler()
323 mei_irq_read_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list, s32 *slots) mei_irq_read_handler() argument
H A Dhw-txe.h43 * @slots: number of empty slots
51 u32 slots; member in struct:mei_txe_hw
H A Dhw-me.c391 * mei_hbuf_filled_slots - gets number of device filled buffer slots
395 * Return: number of filled slots
423 * mei_me_hbuf_empty_slots - counts write empty slots.
427 * Return: -EOVERFLOW if overflow, otherwise empty slots count
480 dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots); mei_me_write_message()
508 * mei_me_count_full_read_slots - counts read full slots.
512 * Return: -EOVERFLOW if overflow, otherwise filled slots count
791 s32 slots; mei_me_irq_thread_handler() local
824 /* check slots available for reading */ mei_me_irq_thread_handler()
825 slots = mei_count_full_read_slots(dev); mei_me_irq_thread_handler()
826 while (slots > 0) { mei_me_irq_thread_handler()
827 dev_dbg(dev->dev, "slots to read = %08x\n", slots); mei_me_irq_thread_handler()
828 rets = mei_irq_read_handler(dev, &complete_list, &slots); mei_me_irq_thread_handler()
H A Dmei_dev.h281 * @hbuf_free_slots : query for write buffer empty slots
287 * @rdbuf_full_slots : query how many slots are filled
463 * @hbuf_depth : depth of hardware host/write buffer is slots
609 * mei_data2slots - get slots - number of (dwords) from a message length
614 * Return: number of slots
622 * mei_slots2data - get data in slots - bytes from slots
624 * @slots: number of available slots
626 * Return: number of bytes in slots
628 static inline u32 mei_slots2data(int slots) mei_slots2data() argument
630 return slots * 4; mei_slots2data()
651 struct mei_cl_cb *cmpl_list, s32 *slots);
683 int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
/linux-4.1.27/arch/cris/arch-v32/mach-fs/
H A Darbiter.c5 * The algorithm first assigns slots to the clients that has specified
6 * bandwidth (e.g. ethernet) and then the remaining slots are divided
58 * (memory arbiter slots, that is)
61 * Program the memory arbiter slots for "region" according to what's
65 * number of slots, free to hand out to any client.
75 * This vector corresponds to the hardware arbiter slots (see crisv32_arbiter_config()
88 /* Allocate the requested non-zero number of slots, but crisv32_arbiter_config()
92 * first to get to any spare slots, else those slots crisv32_arbiter_config()
109 * free slots. crisv32_arbiter_config()
137 * Allocate remaining slots in round-robin crisv32_arbiter_config()
178 * "fixed scheme" for unclaimed slots. Though, if for some crisv32_arbiter_init()
220 * We make sure that there are enough slots only for non-zero crisv32_arbiter_allocate_bandwidth()
221 * requests. Requesting 0 bandwidth *may* allocate slots, crisv32_arbiter_allocate_bandwidth()
244 * slots will just be unused. However, handing out those unused slots
246 * would give unclaimed slots to an eager low-index client.
/linux-4.1.27/arch/sh/drivers/pci/
H A Dfixups-cayman.c13 5V slots get into the CPU via a different path from the IRQ lines pcibios_map_platform_irq()
14 from the 3 3.3V slots. Thus, we have to detect whether the card's pcibios_map_platform_irq()
18 The added complication is that we don't know that the 5V slots are pcibios_map_platform_irq()
63 /* 5V slots */ pcibios_map_platform_irq()
H A Dfixups-sdk7786.c18 * The SDK7786 FPGA supports mangling of most of the slots in some way or
/linux-4.1.27/fs/btrfs/
H A Dinode-item.c37 item_size = btrfs_item_size_nr(leaf, path->slots[0]); find_name_in_backref()
38 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); find_name_in_backref()
67 item_size = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_find_name_in_ext_backref()
68 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); btrfs_find_name_in_ext_backref()
166 item_size = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_del_inode_extref()
180 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); btrfs_del_inode_extref()
234 item_size = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_del_inode_ref()
245 item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); btrfs_del_inode_ref()
307 item = btrfs_item_nr(path->slots[0]); btrfs_insert_inode_extref()
308 ptr = (unsigned long)btrfs_item_ptr(leaf, path->slots[0], char); btrfs_insert_inode_extref()
356 old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); btrfs_insert_inode_ref()
358 ref = btrfs_item_ptr(path->nodes[0], path->slots[0], btrfs_insert_inode_ref()
374 ref = btrfs_item_ptr(path->nodes[0], path->slots[0], btrfs_insert_inode_ref()
429 location->offset == (u64)-1 && path->slots[0] != 0) { btrfs_lookup_inode()
430 slot = path->slots[0] - 1; btrfs_lookup_inode()
435 path->slots[0]--; btrfs_lookup_inode()
H A Dfile-item.c67 item = btrfs_item_ptr(leaf, path->slots[0], btrfs_insert_file_extent()
110 if (path->slots[0] == 0) btrfs_lookup_csum()
112 path->slots[0]--; btrfs_lookup_csum()
113 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_lookup_csum()
119 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_lookup_csum()
129 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); btrfs_lookup_csum()
258 path->slots[0]); __btrfs_lookup_bio_sums()
262 path->slots[0]); __btrfs_lookup_bio_sums()
266 item = btrfs_item_ptr(path->nodes[0], path->slots[0], __btrfs_lookup_bio_sums()
342 if (ret > 0 && path->slots[0] > 0) { btrfs_lookup_csums_range()
344 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); btrfs_lookup_csums_range()
350 btrfs_item_size_nr(leaf, path->slots[0] - 1)) btrfs_lookup_csums_range()
351 path->slots[0]--; btrfs_lookup_csums_range()
357 if (path->slots[0] >= btrfs_header_nritems(leaf)) { btrfs_lookup_csums_range()
366 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_lookup_csums_range()
375 size = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_lookup_csums_range()
378 path->slots[0]++; btrfs_lookup_csums_range()
383 item = btrfs_item_ptr(path->nodes[0], path->slots[0], btrfs_lookup_csums_range()
411 path->slots[0]++; btrfs_lookup_csums_range()
528 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; truncate_one_csum()
592 if (path->slots[0] == 0) btrfs_del_csums()
594 path->slots[0]--; btrfs_del_csums()
600 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_del_csums()
610 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; btrfs_del_csums()
653 path->slots[0]); btrfs_del_csums()
719 item_end = btrfs_item_ptr(leaf, path->slots[0], btrfs_csum_file_blocks()
722 btrfs_item_size_nr(leaf, path->slots[0])); btrfs_csum_file_blocks()
733 item_size = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_csum_file_blocks()
740 int slot = path->slots[0] + 1; btrfs_csum_file_blocks()
743 if (!nritems || (path->slots[0] >= nritems - 1)) { btrfs_csum_file_blocks()
749 slot = path->slots[0]; btrfs_csum_file_blocks()
773 if (path->slots[0] == 0) btrfs_csum_file_blocks()
775 path->slots[0]--; btrfs_csum_file_blocks()
779 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_csum_file_blocks()
789 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) / btrfs_csum_file_blocks()
810 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]); btrfs_csum_file_blocks()
847 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); btrfs_csum_file_blocks()
849 btrfs_item_size_nr(leaf, path->slots[0])); btrfs_csum_file_blocks()
887 const int slot = path->slots[0]; btrfs_extent_item_to_extent_map()
H A Dctree.c126 p->slots[i] = 0; btrfs_release_path()
1891 int orig_slot = path->slots[level]; balance_level()
1907 pslot = path->slots[level + 1]; balance_level()
2066 path->slots[level + 1] -= 1; balance_level()
2067 path->slots[level] = orig_slot; balance_level()
2074 path->slots[level] = orig_slot; balance_level()
2079 btrfs_node_blockptr(path->nodes[level], path->slots[level])) balance_level()
2109 int orig_slot = path->slots[level]; push_nodes_for_insert()
2119 pslot = path->slots[level + 1]; push_nodes_for_insert()
2159 path->slots[level + 1] -= 1; push_nodes_for_insert()
2160 path->slots[level] = orig_slot; push_nodes_for_insert()
2166 path->slots[level] = orig_slot; push_nodes_for_insert()
2213 path->slots[level + 1] += 1; push_nodes_for_insert()
2214 path->slots[level] = orig_slot - push_nodes_for_insert()
2316 slot = path->slots[level + 1]; reada_for_balance()
2374 if (!no_skips && path->slots[i] == 0) { unlock_up()
2382 if (nritems < 1 || path->slots[i] >= nritems - 1) { unlock_up()
2636 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { btrfs_find_item()
2643 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); btrfs_find_item()
2789 p->slots[level + 1], &b); btrfs_search_slot()
2827 p->slots[level] = slot; btrfs_search_slot()
2837 slot = p->slots[level]; btrfs_search_slot()
2857 p->slots[level]++; btrfs_search_slot()
2894 p->slots[level] = slot; btrfs_search_slot()
2995 p->slots[level] = slot; btrfs_search_old_slot()
3000 p->slots[level]++; btrfs_search_old_slot()
3029 p->slots[level] = slot; btrfs_search_old_slot()
3077 if (p->slots[0] >= btrfs_header_nritems(leaf)) { btrfs_search_slot_for_read()
3093 if (p->slots[0] == 0) { btrfs_search_slot_for_read()
3099 if (p->slots[0] == btrfs_header_nritems(leaf)) btrfs_search_slot_for_read()
3100 p->slots[0]--; btrfs_search_slot_for_read()
3114 --p->slots[0]; btrfs_search_slot_for_read()
3136 int tslot = path->slots[i]; fixup_low_keys()
3163 slot = path->slots[0]; btrfs_set_item_key_safe()
3385 path->slots[level] = 0; insert_new_root()
3520 path->slots[level + 1] + 1, level + 1); split_node()
3522 if (path->slots[level] >= mid) { split_node()
3523 path->slots[level] -= mid; split_node()
3527 path->slots[level + 1] += 1; split_node()
3615 if (path->slots[0] >= left_nritems) __push_leaf_right()
3618 slot = path->slots[1]; __push_leaf_right()
3624 if (path->slots[0] > i) __push_leaf_right()
3626 if (path->slots[0] == i) { __push_leaf_right()
3633 if (path->slots[0] == i) __push_leaf_right()
3705 if (path->slots[0] >= left_nritems) { __push_leaf_right()
3706 path->slots[0] -= left_nritems; __push_leaf_right()
3712 path->slots[1] += 1; __push_leaf_right()
3751 slot = path->slots[1]; push_leaf_right()
3783 if (path->slots[0] == left_nritems && !empty) { push_leaf_right()
3791 path->slots[0] = 0; push_leaf_right()
3792 path->slots[1]++; push_leaf_right()
3843 if (path->slots[0] < i) __push_leaf_left()
3845 if (path->slots[0] == i) { __push_leaf_left()
3852 if (path->slots[0] == i) __push_leaf_left()
3938 if (path->slots[0] < push_items) { __push_leaf_left()
3939 path->slots[0] += old_left_nritems; __push_leaf_left()
3943 path->slots[1] -= 1; __push_leaf_left()
3947 path->slots[0] -= push_items; __push_leaf_left()
3949 BUG_ON(path->slots[0] < 0); __push_leaf_left()
3976 slot = path->slots[1]; push_leaf_left()
4073 path->slots[1] + 1, 1); copy_for_split()
4077 BUG_ON(path->slots[0] != slot); copy_for_split()
4083 path->slots[0] -= mid; copy_for_split()
4084 path->slots[1] += 1; copy_for_split()
4090 BUG_ON(path->slots[0] < 0); copy_for_split()
4096 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4114 slot = path->slots[0]; push_for_double_split()
4134 if (path->slots[0] == 0 || path->slots[0] == nritems) push_for_double_split()
4141 slot = path->slots[0]; push_for_double_split()
4180 slot = path->slots[0]; split_leaf()
4217 slot = path->slots[0]; split_leaf()
4287 path->slots[1] + 1, 1); split_leaf()
4291 path->slots[0] = 0; split_leaf()
4292 path->slots[1] += 1; split_leaf()
4296 path->slots[1], 1); split_leaf()
4300 path->slots[0] = 0; split_leaf()
4301 if (path->slots[1] == 0) split_leaf()
4338 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); setup_leaf_for_split()
4346 item_size = btrfs_item_size_nr(leaf, path->slots[0]); setup_leaf_for_split()
4348 fi = btrfs_item_ptr(leaf, path->slots[0], setup_leaf_for_split()
4366 if (item_size != btrfs_item_size_nr(leaf, path->slots[0])) setup_leaf_for_split()
4374 fi = btrfs_item_ptr(leaf, path->slots[0], setup_leaf_for_split()
4414 item = btrfs_item_nr(path->slots[0]); split_item()
4423 path->slots[0]), item_size); split_item()
4425 slot = path->slots[0] + 1; split_item()
4450 btrfs_item_ptr_offset(leaf, path->slots[0]), split_item()
4513 item_size = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_duplicate_item()
4519 path->slots[0]++; btrfs_duplicate_item()
4525 btrfs_item_ptr_offset(leaf, path->slots[0]), btrfs_duplicate_item()
4526 btrfs_item_ptr_offset(leaf, path->slots[0] - 1), btrfs_duplicate_item()
4554 slot = path->slots[0]; btrfs_truncate_item()
4660 slot = path->slots[0]; btrfs_extend_item()
4719 if (path->slots[0] == 0) { setup_items_for_insert()
4728 slot = path->slots[0]; setup_items_for_insert()
4819 slot = path->slots[0]; btrfs_insert_empty_items()
4846 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); btrfs_insert_item()
4899 * a helper function to delete the leaf pointed to by path->slots[1] and
4914 del_ptr(root, path, 1, path->slots[1]); btrfs_del_leaf()
5004 slot = path->slots[1]; btrfs_del_items()
5022 path->slots[1] = slot; btrfs_del_items()
5150 path->slots[level] = slot; btrfs_search_forward()
5176 path->slots[level] = slot; btrfs_search_forward()
5189 path->slots[level] = slot; btrfs_search_forward()
5221 path->slots[*level]); tree_move_down()
5222 path->slots[*level - 1] = 0; tree_move_down()
5234 path->slots[*level]++; tree_move_next_or_upnext()
5236 while (path->slots[*level] >= nritems) { tree_move_next_or_upnext()
5241 path->slots[*level] = 0; tree_move_next_or_upnext()
5245 path->slots[*level]++; tree_move_next_or_upnext()
5274 path->slots[*level]); tree_advance()
5277 path->slots[*level]); tree_advance()
5291 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]); tree_compare_item()
5292 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]); tree_compare_item()
5296 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]); tree_compare_item()
5298 right_path->slots[0]); tree_compare_item()
5420 &left_key, left_path->slots[left_level]); btrfs_compare_trees()
5423 &left_key, left_path->slots[left_level]); btrfs_compare_trees()
5426 &right_key, right_path->slots[right_level]); btrfs_compare_trees()
5429 &right_key, right_path->slots[right_level]); btrfs_compare_trees()
5530 left_path->slots[left_level]); btrfs_compare_trees()
5533 right_path->slots[right_level]); btrfs_compare_trees()
5536 left_path->slots[left_level]); btrfs_compare_trees()
5539 right_path->slots[right_level]); btrfs_compare_trees()
5589 slot = path->slots[level] + 1; btrfs_find_next_key()
5621 slot = path->slots[level]; btrfs_find_next_key()
5696 if (nritems > 0 && path->slots[0] < nritems - 1) { btrfs_next_old_leaf()
5698 path->slots[0]++; btrfs_next_old_leaf()
5713 * where it should be inserted, so the path->slots[0] item must be the btrfs_next_old_leaf()
5716 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) { btrfs_next_old_leaf()
5727 slot = path->slots[level] + 1; btrfs_next_old_leaf()
5780 path->slots[level] = slot; btrfs_next_old_leaf()
5789 path->slots[level] = 0; btrfs_next_old_leaf()
5842 if (path->slots[0] == 0) { btrfs_previous_item()
5848 path->slots[0]--; btrfs_previous_item()
5854 if (path->slots[0] == nritems) btrfs_previous_item()
5855 path->slots[0]--; btrfs_previous_item()
5857 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_previous_item()
5884 if (path->slots[0] == 0) { btrfs_previous_extent_item()
5890 path->slots[0]--; btrfs_previous_extent_item()
5896 if (path->slots[0] == nritems) btrfs_previous_extent_item()
5897 path->slots[0]--; btrfs_previous_extent_item()
5899 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_previous_extent_item()
H A Dexport.c184 if (path->slots[0] == 0) { btrfs_get_parent()
189 path->slots[0]--; btrfs_get_parent()
192 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_get_parent()
199 ref = btrfs_item_ptr(leaf, path->slots[0], btrfs_get_parent()
266 path->slots[0]--; btrfs_get_name()
275 rref = btrfs_item_ptr(leaf, path->slots[0], btrfs_get_name()
280 iref = btrfs_item_ptr(leaf, path->slots[0], btrfs_get_name()
H A Ddir-item.c57 item = btrfs_item_nr(path->slots[0]); insert_with_overflow()
58 ptr = btrfs_item_ptr(leaf, path->slots[0], char); insert_with_overflow()
262 slot = path->slots[0]; btrfs_check_dir_item_collision()
330 if (path->slots[0] >= nritems) { btrfs_search_dir_index_item()
341 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_search_dir_index_item()
349 path->slots[0]++; btrfs_search_dir_index_item()
394 dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); btrfs_match_dir_item_name()
398 total_len = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_match_dir_item_name()
434 item_len = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_delete_one_dir_name()
442 start = btrfs_item_ptr_offset(leaf, path->slots[0]); btrfs_delete_one_dir_name()
H A Dtree-log.c371 path->slots[0]); overwrite_item()
390 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); overwrite_item()
417 item = btrfs_item_ptr(path->nodes[0], path->slots[0], overwrite_item()
465 path->slots[0]); overwrite_item()
475 path->slots[0]); overwrite_item()
645 existing = btrfs_item_ptr(leaf, path->slots[0], replay_one_extent()
680 path->slots[0]); replay_one_extent()
889 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); backref_in_log()
899 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); backref_in_log()
961 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); __add_inode_ref()
962 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); __add_inode_ref()
1019 item_size = btrfs_item_size_nr(leaf, path->slots[0]); __add_inode_ref()
1020 base = btrfs_item_ptr_offset(leaf, path->slots[0]); __add_inode_ref()
1311 item_size = btrfs_item_size_nr(leaf, path->slots[0]); count_inode_extrefs()
1312 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); count_inode_extrefs()
1354 if (path->slots[0] == 0) count_inode_refs()
1356 path->slots[0]--; count_inode_refs()
1360 path->slots[0]); count_inode_refs()
1364 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); count_inode_refs()
1366 path->slots[0]); count_inode_refs()
1379 if (path->slots[0] > 0) { count_inode_refs()
1380 path->slots[0]--; count_inode_refs()
1466 if (path->slots[0] == 0) fixup_inode_link_counts()
1468 path->slots[0]--; fixup_inode_link_counts()
1471 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); fixup_inode_link_counts()
1799 if (path->slots[0] == 0) find_dir_range()
1801 path->slots[0]--; find_dir_range()
1804 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); find_dir_range()
1810 item = btrfs_item_ptr(path->nodes[0], path->slots[0], find_dir_range()
1824 if (path->slots[0] >= nritems) { find_dir_range()
1829 path->slots[0]++; find_dir_range()
1832 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); find_dir_range()
1838 item = btrfs_item_ptr(path->nodes[0], path->slots[0], find_dir_range()
1877 slot = path->slots[0]; check_item_in_log()
1987 for (i = path->slots[0]; i < nritems; i++) { replay_xattr_deletes()
2122 if (path->slots[0] >= nritems) { replay_dir_deletes()
2128 path->slots[0]); replay_dir_deletes()
2310 if (path->slots[*level] >= walk_down_log_tree()
2314 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); walk_down_log_tree()
2315 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); walk_down_log_tree()
2332 path->slots[*level]++; walk_down_log_tree()
2372 path->slots[*level] = 0; walk_down_log_tree()
2378 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); walk_down_log_tree()
2395 slot = path->slots[i]; walk_up_log_tree()
2397 path->slots[i]++; walk_up_log_tree()
2465 path->slots[level] = 0; walk_log_tree()
3033 item = btrfs_item_ptr(path->nodes[0], path->slots[0], btrfs_del_dir_entries_in_log()
3118 item = btrfs_item_ptr(path->nodes[0], path->slots[0], insert_dir_log_key()
3181 path->slots[0]); log_dir_items()
3192 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); log_dir_items()
3196 path->nodes[0], path->slots[0], log_dir_items()
3219 for (i = path->slots[0]; i < nritems; i++) { log_dir_items()
3264 path->slots[0] = nritems; log_dir_items()
3275 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); log_dir_items()
3282 path->nodes[0], path->slots[0], log_dir_items()
3379 if (path->slots[0] == 0) drop_objectid_items()
3382 path->slots[0]--; drop_objectid_items()
3384 path->slots[0]); drop_objectid_items()
3395 path->slots[0] - start_slot + 1); drop_objectid_items()
3477 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], log_inode_item()
3532 for (i = 0; i < nr; i++, dst_path->slots[0]++) { copy_items()
3534 dst_path->slots[0]); copy_items()
3543 dst_path->slots[0], copy_items()
3659 if (src_path->slots[0]) copy_items()
3660 src_path->slots[0]--; copy_items()
3662 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]); copy_items()
3666 extent = btrfs_item_ptr(src, src_path->slots[0], copy_items()
3671 src_path->slots[0], copy_items()
3702 i = src_path->slots[0]; copy_items()
3976 fi = btrfs_item_ptr(leaf, path->slots[0], log_one_extent()
4117 item = btrfs_item_ptr(path->nodes[0], path->slots[0], logged_inode_size()
4156 int slot = path->slots[0]; btrfs_log_all_xattrs()
4188 path->slots[0]++; btrfs_log_all_xattrs()
4256 ASSERT(path->slots[0] > 0); btrfs_log_trailing_hole()
4257 path->slots[0]--; btrfs_log_trailing_hole()
4259 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_log_trailing_hole()
4276 extent = btrfs_item_ptr(leaf, path->slots[0], btrfs_log_trailing_hole()
4282 path->slots[0], btrfs_log_trailing_hole()
4604 path->slots[0], btrfs_log_inode()
4636 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { btrfs_log_inode()
4640 ins_start_slot = path->slots[0]; btrfs_log_inode()
4658 ins_start_slot = path->slots[0]; btrfs_log_inode()
4662 path->slots[0]++; btrfs_log_inode()
4663 if (path->slots[0] < nritems) { btrfs_log_inode()
4665 path->slots[0]); btrfs_log_inode()
4972 for (i = path->slots[0]; i < nritems; i++) { log_new_dir_dentries()
5263 if (path->slots[0] == 0) btrfs_recover_log_trees()
5265 path->slots[0]--; btrfs_recover_log_trees()
5268 path->slots[0]); btrfs_recover_log_trees()
H A Droot-tree.c99 if (path->slots[0] == 0) btrfs_find_root()
101 path->slots[0]--; btrfs_find_root()
106 slot = path->slots[0]; btrfs_find_root()
164 slot = path->slots[0]; btrfs_update_root()
195 slot = path->slots[0]; btrfs_update_root()
255 if (path->slots[0] >= btrfs_header_nritems(leaf)) { btrfs_find_orphan_roots()
264 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_find_orphan_roots()
374 ref = btrfs_item_ptr(leaf, path->slots[0], btrfs_del_root_ref()
448 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); btrfs_add_root_ref()
H A Duuid-tree.c66 slot = path->slots[0]; btrfs_uuid_tree_lookup()
127 slot = path->slots[0]; btrfs_uuid_tree_add()
136 slot = path->slots[0]; btrfs_uuid_tree_add()
197 slot = path->slots[0]; btrfs_uuid_tree_rem()
292 slot = path->slots[0]; btrfs_uuid_tree_iterate()
H A Dfile.c733 if (ret > 0 && path->slots[0] > 0 && search_start == start) { __btrfs_drop_extents()
735 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); __btrfs_drop_extents()
738 path->slots[0]--; __btrfs_drop_extents()
744 if (path->slots[0] >= btrfs_header_nritems(leaf)) { __btrfs_drop_extents()
758 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); __btrfs_drop_extents()
765 path->slots[0]++; __btrfs_drop_extents()
771 fi = btrfs_item_ptr(leaf, path->slots[0], __btrfs_drop_extents()
785 path->slots[0], fi); __btrfs_drop_extents()
804 path->slots[0]++; __btrfs_drop_extents()
839 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, __btrfs_drop_extents()
844 fi = btrfs_item_ptr(leaf, path->slots[0], __btrfs_drop_extents()
907 path->slots[0]++; __btrfs_drop_extents()
918 del_slot = path->slots[0]; __btrfs_drop_extents()
921 BUG_ON(del_slot + del_nr != path->slots[0]); __btrfs_drop_extents()
945 if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) { __btrfs_drop_extents()
946 path->slots[0]++; __btrfs_drop_extents()
969 * Set path->slots[0] to first slot, so that after the delete __btrfs_drop_extents()
972 * path->slots[0] for our insertion (if replace_extent != 0). __btrfs_drop_extents()
974 path->slots[0] = del_slot; __btrfs_drop_extents()
995 if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) { __btrfs_drop_extents()
998 btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]); __btrfs_drop_extents()
1000 path->slots[0]++; __btrfs_drop_extents()
1108 if (ret > 0 && path->slots[0] > 0) btrfs_mark_extent_written()
1109 path->slots[0]--; btrfs_mark_extent_written()
1112 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_mark_extent_written()
1114 fi = btrfs_item_ptr(leaf, path->slots[0], btrfs_mark_extent_written()
1129 if (extent_mergeable(leaf, path->slots[0] - 1, btrfs_mark_extent_written()
1134 fi = btrfs_item_ptr(leaf, path->slots[0], btrfs_mark_extent_written()
1142 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, btrfs_mark_extent_written()
1156 if (extent_mergeable(leaf, path->slots[0] + 1, btrfs_mark_extent_written()
1159 fi = btrfs_item_ptr(leaf, path->slots[0], btrfs_mark_extent_written()
1165 path->slots[0]++; btrfs_mark_extent_written()
1169 fi = btrfs_item_ptr(leaf, path->slots[0], btrfs_mark_extent_written()
1198 fi = btrfs_item_ptr(leaf, path->slots[0] - 1, btrfs_mark_extent_written()
1204 fi = btrfs_item_ptr(leaf, path->slots[0], btrfs_mark_extent_written()
1222 path->slots[0]--; btrfs_mark_extent_written()
1230 if (extent_mergeable(leaf, path->slots[0] + 1, btrfs_mark_extent_written()
1238 del_slot = path->slots[0] + 1; btrfs_mark_extent_written()
1247 if (extent_mergeable(leaf, path->slots[0] - 1, btrfs_mark_extent_written()
1255 del_slot = path->slots[0]; btrfs_mark_extent_written()
1263 fi = btrfs_item_ptr(leaf, path->slots[0], btrfs_mark_extent_written()
2152 if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) { fill_holes()
2155 path->slots[0]--; fill_holes()
2156 fi = btrfs_item_ptr(leaf, path->slots[0], fill_holes()
2167 if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) { fill_holes()
2172 fi = btrfs_item_ptr(leaf, path->slots[0], fill_holes()
H A Drelocation.c740 ASSERT(path1->slots[0]);
742 path1->slots[0]--;
770 if (path1->slots[0] >= btrfs_header_nritems(eb)) {
781 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
789 ret = find_inline_backref(eb, path1->slots[0],
820 ref0 = btrfs_item_ptr(eb, path1->slots[0],
920 if (ret > 0 && path2->slots[level] > 0)
921 path2->slots[level]--;
924 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
1014 path1->slots[0]++;
1601 fi = btrfs_item_ptr(leaf, path->slots[0], get_new_location()
1747 btrfs_node_key(path->nodes[level], &key2, path->slots[level]); memcmp_node_keys()
1785 slot = path->slots[lowest_level]; replace_path()
1829 path->slots[level]); replace_path()
1831 path->slots[level]); replace_path()
1878 path->slots[level]); replace_path()
1894 path->slots[level], old_bytenr); replace_path()
1896 path->slots[level], old_ptr_gen); replace_path()
1952 while (path->slots[i] + 1 < nritems) { walk_up_reloc_tree()
1953 path->slots[i]++; walk_up_reloc_tree()
1954 if (btrfs_node_ptr_generation(eb, path->slots[i]) <= walk_up_reloc_tree()
1986 while (path->slots[i] < nritems) { walk_down_reloc_tree()
1987 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); walk_down_reloc_tree()
1990 path->slots[i]++; walk_down_reloc_tree()
1992 if (path->slots[i] >= nritems) { walk_down_reloc_tree()
2003 bytenr = btrfs_node_blockptr(eb, path->slots[i]); walk_down_reloc_tree()
2011 path->slots[i - 1] = 0; walk_down_reloc_tree()
2095 if (path->slots[level] + 1 < find_next_key()
2098 path->slots[level] + 1); find_next_key()
2140 path->slots[level] = 0; merge_reloc_root()
2155 path->slots[level]); merge_reloc_root()
2205 path->slots[level]); merge_reloc_root()
2219 path->slots[level]); merge_reloc_root()
2694 slot = path->slots[upper->level]; do_relocation()
3242 slot = path->slots[0]; get_ref_objectid_v0()
3250 slot = path->slots[0]; get_ref_objectid_v0()
3290 item_size = btrfs_item_size_nr(eb, path->slots[0]); add_tree_block()
3294 ei = btrfs_item_ptr(eb, path->slots[0], add_tree_block()
3382 if (path->slots[0]) { __add_tree_block()
3383 path->slots[0]--; __add_tree_block()
3385 path->slots[0]); __add_tree_block()
3562 path->slots[0] = nritems; find_data_references()
3566 while (path->slots[0] >= nritems) { find_data_references()
3588 path->slots[0] = nritems; find_data_references()
3592 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); find_data_references()
3597 fi = btrfs_item_ptr(leaf, path->slots[0], find_data_references()
3636 path->slots[0] = nritems; find_data_references()
3638 path->slots[0]++; find_data_references()
3666 ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
3667 end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
3700 if (path->slots[0] >= btrfs_header_nritems(eb)) {
3711 btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
3725 dref = btrfs_item_ptr(eb, path->slots[0],
3736 path->slots[0]++;
3778 if (path->slots[0] >= btrfs_header_nritems(leaf)) { find_next_extent()
3785 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); find_next_extent()
3793 path->slots[0]++; find_next_extent()
3799 path->slots[0]++; find_next_extent()
3806 path->slots[0]++; find_next_extent()
3953 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], relocate_block_group()
3955 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); relocate_block_group()
4109 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); __insert_orphan_inode()
4353 if (path->slots[0] == 0) btrfs_recover_relocation()
4355 path->slots[0]--; btrfs_recover_relocation()
4358 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_recover_relocation()
H A Dextent-tree.c436 if (path->slots[0] < nritems) { caching_thread()
437 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); caching_thread()
474 path->slots[0]++; caching_thread()
498 path->slots[0]++; caching_thread()
790 if (path->slots[0]) { btrfs_lookup_extent_info()
791 path->slots[0]--; btrfs_lookup_extent_info()
793 path->slots[0]); btrfs_lookup_extent_info()
803 item_size = btrfs_item_size_nr(leaf, path->slots[0]); btrfs_lookup_extent_info()
805 ei = btrfs_item_ptr(leaf, path->slots[0], btrfs_lookup_extent_info()
813 ei0 = btrfs_item_ptr(leaf, path->slots[0], btrfs_lookup_extent_info()
997 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0)); convert_extent_item_v0()
999 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); convert_extent_item_v0()
1000 ei0 = btrfs_item_ptr(leaf, path->slots[0], convert_extent_item_v0()
1006 if (path->slots[0] >= btrfs_header_nritems(leaf)) { convert_extent_item_v0()
1014 path->slots[0]); convert_extent_item_v0()
1017 path->slots[0]++; convert_extent_item_v0()
1020 ref0 = btrfs_item_ptr(leaf, path->slots[0], convert_extent_item_v0()
1041 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); convert_extent_item_v0()
1148 if (path->slots[0] >= nritems) { lookup_extent_data_ref()
1160 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); lookup_extent_data_ref()
1165 ref = btrfs_item_ptr(leaf, path->slots[0], lookup_extent_data_ref()
1177 path->slots[0]++; lookup_extent_data_ref()
1215 ref = btrfs_item_ptr(leaf, path->slots[0], insert_extent_data_ref()
1227 ref = btrfs_item_ptr(leaf, path->slots[0], insert_extent_data_ref()
1241 ref = btrfs_item_ptr(leaf, path->slots[0], insert_extent_data_ref()
1275 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); remove_extent_data_ref()
1278 ref1 = btrfs_item_ptr(leaf, path->slots[0], remove_extent_data_ref()
1282 ref2 = btrfs_item_ptr(leaf, path->slots[0], remove_extent_data_ref()
1288 ref0 = btrfs_item_ptr(leaf, path->slots[0], remove_extent_data_ref()
1310 ref0 = btrfs_item_ptr(leaf, path->slots[0], remove_extent_data_ref()
1331 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); extent_data_ref_count()
1342 ref1 = btrfs_item_ptr(leaf, path->slots[0], extent_data_ref_count()
1346 ref2 = btrfs_item_ptr(leaf, path->slots[0], extent_data_ref_count()
1352 ref0 = btrfs_item_ptr(leaf, path->slots[0], extent_data_ref_count()
1442 if (path->slots[level] + 1 >= find_next_key()
1447 path->slots[level] + 1); find_next_key()
1450 path->slots[level] + 1); find_next_key()
1527 if (path->slots[0]) { lookup_inline_extent_backref()
1528 path->slots[0]--; lookup_inline_extent_backref()
1530 path->slots[0]); lookup_inline_extent_backref()
1554 item_size = btrfs_item_size_nr(leaf, path->slots[0]); lookup_inline_extent_backref()
1568 item_size = btrfs_item_size_nr(leaf, path->slots[0]); lookup_inline_extent_backref()
1573 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); lookup_inline_extent_backref()
1680 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); setup_inline_extent_backref()
1688 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); setup_inline_extent_backref()
1696 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]); setup_inline_extent_backref()
1774 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); update_inline_extent_backref()
1806 item_size = btrfs_item_size_nr(leaf, path->slots[0]); update_inline_extent_backref()
2008 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); __btrfs_inc_extent_ref()
2009 item = btrfs_item_ptr(leaf, path->slots[0], __btrfs_inc_extent_ref()
2026 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); __btrfs_inc_extent_ref()
2027 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); __btrfs_inc_extent_ref()
2170 if (path->slots[0] > 0) { run_delayed_extent_op()
2171 path->slots[0]--; run_delayed_extent_op()
2173 path->slots[0]); run_delayed_extent_op()
2195 item_size = btrfs_item_size_nr(leaf, path->slots[0]); run_delayed_extent_op()
2205 item_size = btrfs_item_size_nr(leaf, path->slots[0]); run_delayed_extent_op()
2209 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); run_delayed_extent_op()
2990 if (path->slots[0] == 0) check_committed_ref()
2993 path->slots[0]--; check_committed_ref()
2995 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); check_committed_ref()
3001 item_size = btrfs_item_size_nr(leaf, path->slots[0]); check_committed_ref()
3008 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); check_committed_ref()
3178 bi = btrfs_item_ptr_offset(leaf, path->slots[0]); write_one_cache_group()
6147 extent_slot = path->slots[0]; __btrfs_free_extent()
6163 if (path->slots[0] - extent_slot > 5) __btrfs_free_extent()
6195 if (ret > 0 && skinny_metadata && path->slots[0]) { __btrfs_free_extent()
6200 path->slots[0]--; __btrfs_free_extent()
6202 path->slots[0]); __btrfs_free_extent()
6230 extent_slot = path->slots[0]; __btrfs_free_extent()
6249 BUG_ON(found_extent || extent_slot != path->slots[0]); __btrfs_free_extent()
6276 extent_slot = path->slots[0]; __btrfs_free_extent()
6332 BUG_ON(path->slots[0] != extent_slot); __btrfs_free_extent()
6334 BUG_ON(path->slots[0] != extent_slot + 1); __btrfs_free_extent()
6335 path->slots[0] = extent_slot; __btrfs_free_extent()
6341 ret = btrfs_del_items(trans, extent_root, path, path->slots[0], __btrfs_free_extent()
7286 extent_item = btrfs_item_ptr(leaf, path->slots[0], alloc_reserved_file_extent()
7369 extent_item = btrfs_item_ptr(leaf, path->slots[0], alloc_reserved_tree_block()
7700 if (path->slots[wc->level] < wc->reada_slot) { reada_walk_down()
7713 for (slot = path->slots[wc->level]; slot < nritems; slot++) { reada_walk_down()
7721 if (slot == path->slots[wc->level]) reada_walk_down()
7805 * nodes which have had all slots visited. If a node (leaf or
7810 * slots incremented to the next position for a search. If we need to
7830 path->slots[level]++; adjust_slots_upwards()
7831 slot = path->slots[level]; adjust_slots_upwards()
7844 path->slots[level] = 0; adjust_slots_upwards()
7859 if (path->slots[root_level] >= btrfs_header_nritems(eb)) adjust_slots_upwards()
7911 path->slots[root_level] = 0; account_shared_subtree()
7924 parent_slot = path->slots[level + 1]; account_shared_subtree()
7935 path->slots[level] = 0; account_shared_subtree()
7962 /* Restart search with new slots */ account_shared_subtree()
8082 path->slots[level]); do_walk_down()
8094 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); do_walk_down()
8135 path->slots[level]); do_walk_down()
8171 path->slots[level] = 0; do_walk_down()
8242 path->slots[level] = 0; walk_up_proc()
8339 if (path->slots[level] >= walk_down_tree()
8345 path->slots[level]++; walk_down_tree()
8362 path->slots[level] = btrfs_header_nritems(path->nodes[level]); walk_up_tree()
8365 if (path->slots[level] + 1 < walk_up_tree()
8367 path->slots[level]++; walk_up_tree()
8443 path->slots[level] = 0; btrfs_drop_snapshot()
8526 path->slots[level]); btrfs_drop_snapshot()
8674 path->slots[parent_level] = btrfs_header_nritems(parent); btrfs_drop_subtree()
8679 path->slots[level] = 0; btrfs_drop_subtree()
9094 slot = path->slots[0]; find_first_block_group()
9111 path->slots[0]++; find_first_block_group()
9359 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_read_block_groups()
9384 btrfs_item_ptr_offset(leaf, path->slots[0]), btrfs_read_block_groups()
H A Dsend.c803 ii = btrfs_item_ptr(path->nodes[0], path->slots[0], __get_inode_info()
862 int slot = path->slots[0]; iterate_inode_ref()
1004 slot = path->slots[0]; iterate_dir_item()
1125 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]); get_inode_path()
1320 fi = btrfs_item_ptr(eb, path->slots[0], find_extent_clone()
1481 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], read_symlink()
1489 len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei); read_symlink()
1726 path->slots[0]); get_first_ref()
1736 iref = btrfs_item_ptr(path->nodes[0], path->slots[0], get_first_ref()
1745 extref = btrfs_item_ptr(path->nodes[0], path->slots[0], get_first_ref()
2318 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); send_subvol_begin()
2324 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); send_subvol_begin()
2478 slot = path->slots[0]; send_utimes()
2618 slot = path->slots[0]; did_create_dir()
2646 path->slots[0]++; did_create_dir()
2885 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { can_rmdir()
2894 path->slots[0]); can_rmdir()
2899 di = btrfs_item_ptr(path->nodes[0], path->slots[0], can_rmdir()
2923 path->slots[0]++; can_rmdir()
4023 slot = path->slots[0]; process_all_refs()
4044 path->slots[0]++; process_all_refs()
4331 slot = path->slots[0]; process_all_new_xattrs()
4355 path->slots[0]++; process_all_new_xattrs()
4621 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], send_write_or_clone()
4626 path->slots[0], ei); send_write_or_clone()
4694 slot = left_path->slots[0]; is_extent_unchanged()
4743 slot = path->slots[0]; is_extent_unchanged()
4806 slot = path->slots[0]; is_extent_unchanged()
4859 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); get_last_extent()
4863 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], get_last_extent()
4868 path->slots[0], fi); get_last_extent()
4898 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], maybe_send_hole()
4903 path->slots[0], fi); maybe_send_hole()
4911 if (path->slots[0] == 0 && maybe_send_hole()
4953 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], process_extent()
5015 slot = path->slots[0]; process_all_extents()
5040 path->slots[0]++; process_all_extents()
5216 sctx->left_path->slots[0], changed_inode()
5222 sctx->right_path->slots[0], changed_inode()
5229 sctx->right_path->slots[0], changed_inode()
5454 item_size = btrfs_item_size_nr(leaf, path->slots[0]); compare_refs()
5455 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); compare_refs()
5558 slot = path->slots[0]; full_send_tree()
H A Dinode.c176 ei = btrfs_item_ptr(leaf, path->slots[0], insert_inline_extent()
1272 if (ret > 0 && path->slots[0] > 0 && check_prev) { run_delalloc_nocow()
1275 path->slots[0] - 1); run_delalloc_nocow()
1278 path->slots[0]--; run_delalloc_nocow()
1283 if (path->slots[0] >= btrfs_header_nritems(leaf)) { run_delalloc_nocow()
1295 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); run_delalloc_nocow()
1301 path->slots[0]++; run_delalloc_nocow()
1314 fi = btrfs_item_ptr(leaf, path->slots[0], run_delalloc_nocow()
1328 path->slots[0]++; run_delalloc_nocow()
1368 path->slots[0], fi); run_delalloc_nocow()
1375 path->slots[0]++; run_delalloc_nocow()
1386 path->slots[0]++; run_delalloc_nocow()
2097 fi = btrfs_item_ptr(leaf, path->slots[0], insert_reserved_file_extent()
2274 slot = path->slots[0]; record_one_backref()
2287 path->slots[0]++; record_one_backref()
2489 extent = btrfs_item_ptr(path->nodes[0], path->slots[0], relink_extent_backref()
2526 path->slots[0]--; relink_extent_backref()
2528 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); relink_extent_backref()
2530 fi = btrfs_item_ptr(leaf, path->slots[0], relink_extent_backref()
2558 item = btrfs_item_ptr(leaf, path->slots[0], relink_extent_backref()
2697 if (ret > 0 && path->slots[0] > 0) record_old_file_extents()
2698 path->slots[0]--; record_old_file_extents()
2712 slot = path->slots[0]; record_old_file_extents()
2759 path->slots[0]++; record_old_file_extents()
3336 if (path->slots[0] == 0) btrfs_orphan_cleanup()
3338 path->slots[0]--; btrfs_orphan_cleanup()
3343 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_orphan_cleanup()
3605 inode_item = btrfs_item_ptr(leaf, path->slots[0], btrfs_read_locked_inode()
3653 path->slots[0]++; btrfs_read_locked_inode()
3655 path->slots[0] >= btrfs_header_nritems(leaf)) btrfs_read_locked_inode()
3658 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); btrfs_read_locked_inode()
3662 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); btrfs_read_locked_inode()
3680 maybe_acls = acls_after_inode_item(leaf, path->slots[0], btrfs_read_locked_inode()
3683 path->slots[0] = first_xattr_slot; btrfs_read_locked_inode()
3803 inode_item = btrfs_item_ptr(leaf, path->slots[0], btrfs_update_inode_item()
4105 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_unlink_subvol()
4190 int slot = path->slots[0]; truncate_inline_extent()
4327 if (path->slots[0] == 0) btrfs_truncate_inode_items()
4329 path->slots[0]--; btrfs_truncate_inode_items()
4335 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_truncate_inode_items()
4346 fi = btrfs_item_ptr(leaf, path->slots[0], btrfs_truncate_inode_items()
4354 path->slots[0], fi); btrfs_truncate_inode_items()
4458 pending_del_slot = path->slots[0]; btrfs_truncate_inode_items()
4461 path->slots[0] + 1 == pending_del_slot) { btrfs_truncate_inode_items()
4464 pending_del_slot = path->slots[0]; btrfs_truncate_inode_items()
4501 if (path->slots[0] == 0 || btrfs_truncate_inode_items()
4502 path->slots[0] != pending_del_slot || btrfs_truncate_inode_items()
4535 path->slots[0]--; btrfs_truncate_inode_items()
5309 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); fixup_tree_root_location()
5701 slot = path->slots[0]; btrfs_real_readdir()
5785 path->slots[0]++; btrfs_real_readdir()
5959 if (path->slots[0] == 0) { btrfs_set_inode_index_count()
5964 path->slots[0]--; btrfs_set_inode_index_count()
5967 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_set_inode_index_count()
6129 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], btrfs_new_inode()
6136 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, btrfs_new_inode()
6648 btrfs_item_nr(path->slots[0])); uncompress_inline()
6740 if (path->slots[0] == 0) btrfs_get_extent()
6742 path->slots[0]--; btrfs_get_extent()
6746 item = btrfs_item_ptr(leaf, path->slots[0], btrfs_get_extent()
6749 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_get_extent()
6771 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); btrfs_get_extent()
6776 path->slots[0]++; btrfs_get_extent()
6777 if (path->slots[0] >= btrfs_header_nritems(leaf)) { btrfs_get_extent()
6787 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); btrfs_get_extent()
6816 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); btrfs_get_extent()
7136 slot = path->slots[0]; can_nocow_extent()
9578 ei = btrfs_item_ptr(leaf, path->slots[0], btrfs_symlink()
H A Dinode-map.c69 slot = path->slots[0]; caching_kthread()
114 path->slots[0]++; caching_kthread()
538 if (path->slots[0] > 0) { btrfs_find_highest_objectid()
539 slot = path->slots[0] - 1; btrfs_find_highest_objectid()
H A Dtree-defrag.c105 path->slots[1] = btrfs_header_nritems(path->nodes[1]); btrfs_defrag_leaves()
/linux-4.1.27/arch/ia64/include/uapi/asm/
H A Drse.h47 unsigned long slots = (bsp - bspstore); ia64_rse_num_regs() local
49 return slots - (ia64_rse_slot_num(bspstore) + slots)/0x40; ia64_rse_num_regs()
/linux-4.1.27/fs/efs/
H A Ddir.c35 /* each block contains at most 256 slots */ efs_readdir()
60 for (; slot < dirblock->slots; slot++) { efs_readdir()
75 __func__, block, slot, dirblock->slots-1, efs_readdir()
H A Dnamei.c47 for (slot = 0; slot < dirblock->slots; slot++) { efs_find_entry()
/linux-4.1.27/arch/arm64/kernel/
H A Dhw_breakpoint.c183 * @slots: pointer to array of slots
184 * @max_slots: max number of slots
193 static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots, hw_breakpoint_slot_setup() argument
201 slot = &slots[i]; hw_breakpoint_slot_setup()
231 struct perf_event **slots; hw_breakpoint_control() local
241 slots = this_cpu_ptr(bp_on_reg); hw_breakpoint_control()
248 slots = this_cpu_ptr(wp_on_reg); hw_breakpoint_control()
253 i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops); hw_breakpoint_control()
545 struct perf_event **slots; toggle_bp_registers() local
549 slots = this_cpu_ptr(bp_on_reg); toggle_bp_registers()
553 slots = this_cpu_ptr(wp_on_reg); toggle_bp_registers()
561 if (!slots[i]) toggle_bp_registers()
564 privilege = counter_arch_bp(slots[i])->ctrl.privilege; toggle_bp_registers()
586 struct perf_event *bp, **slots; breakpoint_handler() local
590 slots = this_cpu_ptr(bp_on_reg); breakpoint_handler()
597 bp = slots[i]; breakpoint_handler()
662 struct perf_event *wp, **slots; watchpoint_handler() local
667 slots = this_cpu_ptr(wp_on_reg); watchpoint_handler()
673 wp = slots[i]; watchpoint_handler()
853 struct perf_event **slots; hw_breakpoint_reset() local
858 * through the slots, which are all empty, hence it just resets control hw_breakpoint_reset()
861 * notifier some slots might be initialized; if so they are hw_breakpoint_reset()
862 * reprogrammed according to the debug slots content. hw_breakpoint_reset()
864 for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) { hw_breakpoint_reset()
865 if (slots[i]) { hw_breakpoint_reset()
866 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE); hw_breakpoint_reset()
873 for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) { hw_breakpoint_reset()
874 if (slots[i]) { hw_breakpoint_reset()
875 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE); hw_breakpoint_reset()
/linux-4.1.27/include/linux/input/
H A Dmt.h39 * @num_slots: number of MT slots the device uses
44 * @slots: array of slots holding current values of tracked contacts
53 struct input_mt_slot slots[]; member in struct:input_mt
121 int input_mt_assign_slots(struct input_dev *dev, int *slots,
/linux-4.1.27/drivers/input/touchscreen/
H A Dpenmount.c62 struct mt_slot slots[PM_MAX_MTSLOT]; member in struct:pm
77 pm->slots[i].active); pm_mtevent()
78 if (pm->slots[i].active) { pm_mtevent()
79 input_event(input, EV_ABS, ABS_MT_POSITION_X, pm->slots[i].x); pm_mtevent()
80 input_event(input, EV_ABS, ABS_MT_POSITION_Y, pm->slots[i].y); pm_mtevent()
140 pm->slots[slotnum].active = pm->data[0] & 0x30; pm_parse_3000()
141 pm->slots[slotnum].x = pm->data[2] * 256 + pm->data[1]; pm_parse_3000()
142 pm->slots[slotnum].y = pm->data[4] * 256 + pm->data[3]; pm_parse_3000()
156 pm->slots[slotnum].active = pm->data[0] & 0x40; pm_parse_6250()
157 pm->slots[slotnum].x = pm->data[2] * 256 + pm->data[1]; pm_parse_6250()
158 pm->slots[slotnum].y = pm->data[4] * 256 + pm->data[3]; pm_parse_6250()
H A Dpixcir_i2c_ts.c112 int slots[PIXCIR_MAX_SLOTS]; pixcir_ts_report() local
129 input_mt_assign_slots(ts->input, slots, pos, n, 0); pixcir_ts_report()
143 slot = slots[i]; pixcir_ts_report()
527 dev_err(dev, "Error initializing Multi-Touch slots\n"); pixcir_i2c_ts_probe()
/linux-4.1.27/net/sched/
H A Dsch_sfq.c95 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
120 unsigned int divisor; /* number of slots in hash table */
129 sfq_index *ht; /* Hash table ('divisor' slots) */
130 struct sfq_slot *slots; /* Flows table ('maxflows' entries) */ member in struct:sfq_sched_data
137 /* Linked lists of slots, indexed by depth
155 return &q->slots[val].dep; sfq_dep_head()
228 struct sfq_slot *slot = &q->slots[x]; sfq_link()
243 n = q->slots[x].dep.next; \
244 p = q->slots[x].dep.prev; \
257 d = q->slots[x].qlen--; sfq_dec()
270 d = ++q->slots[x].qlen; sfq_inc()
326 slot = &q->slots[x]; sfq_drop()
342 slot = &q->slots[x]; sfq_drop()
389 slot = &q->slots[x]; sfq_enqueue()
395 slot = &q->slots[x]; sfq_enqueue()
507 /* No active slots */ sfq_dequeue()
513 slot = &q->slots[a]; sfq_dequeue()
530 q->tail = NULL; /* no more active slots */ sfq_dequeue()
567 slot = &q->slots[i]; sfq_rehash()
585 slot = &q->slots[x]; sfq_rehash()
596 slot = &q->slots[x]; sfq_rehash()
728 sfq_free(q->slots); sfq_destroy()
764 q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows); sfq_init()
765 if (!q->ht || !q->slots) { sfq_init()
773 slot_queue_init(&q->slots[i]); sfq_init()
868 const struct sfq_slot *slot = &q->slots[idx]; sfq_dump_class_stats()
H A Dsch_qfq.c94 * Maximum number of consecutive slots occupied by backlogged classes
177 unsigned long full_slots; /* non-empty slots */
180 struct hlist_head slots[QFQ_MAX_SLOTS]; member in struct:qfq_group
852 * this would cause non-empty slots to be right-shifted by one
918 hlist_add_head(&agg->next, &grp->slots[i]); qfq_slot_insert()
925 return hlist_entry(grp->slots[grp->front].first, qfq_slot_head()
938 if (hlist_empty(&grp->slots[grp->front])) qfq_front_slot_remove()
969 * move the objects. The mask of occupied slots must be shifted
1360 if (hlist_empty(&grp->slots[i])) qfq_slot_remove()
1403 } else if (hlist_empty(&grp->slots[grp->front])) { qfq_deactivate_agg()
1462 len = qfq_drop_from_slot(q, &grp->slots[j]); qfq_drop()
1502 INIT_HLIST_HEAD(&grp->slots[j]); qfq_init_qdisc()
/linux-4.1.27/drivers/dma/ioat/
H A Ddca.c392 int slots = 0; ioat2_dca_count_dca_slots() local
400 req = readl(iobase + global_req_table + (slots * sizeof(u32))); ioat2_dca_count_dca_slots()
401 slots++; ioat2_dca_count_dca_slots()
404 return slots; ioat2_dca_count_dca_slots()
411 int slots; ioat2_dca_init() local
427 slots = ioat2_dca_count_dca_slots(iobase, dca_offset); ioat2_dca_init()
428 if (slots == 0) ioat2_dca_init()
433 + (sizeof(struct ioat_dca_slot) * slots)); ioat2_dca_init()
440 ioatdca->max_requesters = slots; ioat2_dca_init()
588 int slots = 0; ioat3_dca_count_dca_slots() local
597 req = readl(iobase + global_req_table + (slots * sizeof(u32))); ioat3_dca_count_dca_slots()
598 slots++; ioat3_dca_count_dca_slots()
601 return slots; ioat3_dca_count_dca_slots()
625 int slots; ioat3_dca_init() local
648 slots = ioat3_dca_count_dca_slots(iobase, dca_offset); ioat3_dca_init()
649 if (slots == 0) ioat3_dca_init()
654 + (sizeof(struct ioat_dca_slot) * slots)); ioat3_dca_init()
661 ioatdca->max_requesters = slots; ioat3_dca_init()
/linux-4.1.27/sound/soc/blackfin/
H A Dbf5xx-i2s.c54 unsigned int slots; member in struct:bf5xx_i2s_port
219 unsigned int rx_mask, int slots, int width) bf5xx_i2s_set_tdm_slot()
224 if (slots % 8 != 0 || slots > 8) bf5xx_i2s_set_tdm_slot()
230 bf5xx_i2s->slots = slots; bf5xx_i2s_set_tdm_slot()
234 bf5xx_i2s->tx_dma_data.tdm_mode = slots != 0; bf5xx_i2s_set_tdm_slot()
235 bf5xx_i2s->rx_dma_data.tdm_mode = slots != 0; bf5xx_i2s_set_tdm_slot()
237 return sport_set_multichannel(sport_handle, slots, tx_mask, rx_mask, 0); bf5xx_i2s_set_tdm_slot()
276 return sport_set_multichannel(sport_handle, bf5xx_i2s->slots, bf5xx_i2s_resume()
218 bf5xx_i2s_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int width) bf5xx_i2s_set_tdm_slot() argument
/linux-4.1.27/arch/cris/arch-v32/mach-a3/
H A Darbiter.c5 * The algorithm first assigns slots to the clients that has specified
6 * bandwidth (e.g. ethernet) and then the remaining slots are divided
121 * (memory arbiter slots, that is)
124 * Program the memory arbiter slots for "region" according to what's
128 * number of slots, free to hand out to any client.
138 * This vector corresponds to the hardware arbiter slots (see crisv32_arbiter_config()
151 /* Allocate the requested non-zero number of slots, but crisv32_arbiter_config()
155 * first to get to any spare slots, else those slots crisv32_arbiter_config()
172 * free slots. crisv32_arbiter_config()
200 * Allocate remaining slots in round-robin crisv32_arbiter_config()
247 * "fixed scheme" for unclaimed slots. Though, if for some crisv32_arbiter_init()
311 * We make sure that there are enough slots only for non-zero crisv32_arbiter_allocate_bandwidth()
312 * requests. Requesting 0 bandwidth *may* allocate slots, crisv32_arbiter_allocate_bandwidth()
339 * slots will just be unused. However, handing out those unused slots
341 * would give unclaimed slots to an eager low-index client.
/linux-4.1.27/include/sound/
H A Dsimple_card.h20 int slots; member in struct:asoc_simple_dai
H A Dmixer_oss.h56 struct snd_mixer_oss_slot slots[SNDRV_OSS_MAX_MIXERS]; /* OSS mixer slots */ member in struct:snd_mixer_oss
H A Dsoc-dai.h114 unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width);
145 int (*xlate_tdm_slot_mask)(unsigned int slots,
149 int slots, int slot_width);
/linux-4.1.27/lib/
H A Dassoc_array.c58 ptr = ACCESS_ONCE(node->slots[slot]); assoc_array_subtree_iterate()
92 ptr = ACCESS_ONCE(node->slots[slot]); assoc_array_subtree_iterate()
223 ptr = ACCESS_ONCE(node->slots[slot]); assoc_array_walk()
340 ptr = ACCESS_ONCE(node->slots[slot]); assoc_array_find()
399 struct assoc_array_ptr *ptr = node->slots[slot]; assoc_array_destroy_subtree()
479 edit->leaf_p = &new_n0->slots[0]; assoc_array_insert_in_empty_tree()
522 ptr = node->slots[i]; assoc_array_insert_into_terminal_node()
531 edit->leaf_p = &node->slots[i]; assoc_array_insert_into_terminal_node()
532 edit->dead_leaf = node->slots[i]; assoc_array_insert_into_terminal_node()
543 edit->leaf_p = &node->slots[free_slot]; assoc_array_insert_into_terminal_node()
549 /* The node has no spare slots - so we're either going to have to split assoc_array_insert_into_terminal_node()
566 pr_devel("no spare slots\n"); assoc_array_insert_into_terminal_node()
569 ptr = node->slots[i]; assoc_array_insert_into_terminal_node()
636 * remaining leaf slots, we now have N+1 leaves to go in them. assoc_array_insert_into_terminal_node()
655 if (assoc_array_ptr_is_meta(node->slots[i])) assoc_array_insert_into_terminal_node()
656 new_n0->slots[i] = node->slots[i]; assoc_array_insert_into_terminal_node()
658 new_n0->slots[i] = NULL; assoc_array_insert_into_terminal_node()
659 BUG_ON(new_n0->slots[slot] != NULL); assoc_array_insert_into_terminal_node()
660 new_n0->slots[slot] = assoc_array_node_to_ptr(new_n1); assoc_array_insert_into_terminal_node()
666 if (assoc_array_ptr_is_meta(node->slots[i])) assoc_array_insert_into_terminal_node()
669 new_n1->slots[next_slot++] = node->slots[i]; assoc_array_insert_into_terminal_node()
674 } while (new_n0->slots[free_slot] != NULL); assoc_array_insert_into_terminal_node()
675 new_n0->slots[free_slot] = node->slots[i]; assoc_array_insert_into_terminal_node()
684 } while (new_n0->slots[free_slot] != NULL); assoc_array_insert_into_terminal_node()
685 edit->leaf_p = &new_n0->slots[free_slot]; assoc_array_insert_into_terminal_node()
688 edit->leaf_p = &new_n1->slots[next_slot++]; assoc_array_insert_into_terminal_node()
697 ptr = node->slots[i]; assoc_array_insert_into_terminal_node()
713 edit->set[0].ptr = &assoc_array_ptr_to_node(ptr)->slots[node->parent_slot]; assoc_array_insert_into_terminal_node()
736 new_n1->slots[i] = node->slots[i]; assoc_array_insert_into_terminal_node()
738 new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0); assoc_array_insert_into_terminal_node()
739 edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]]; assoc_array_insert_into_terminal_node()
741 edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot]; assoc_array_insert_into_terminal_node()
765 int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]), assoc_array_insert_into_terminal_node()
809 ptr = node->slots[i]; assoc_array_insert_into_terminal_node()
860 edit->set[0].ptr = &node->slots[shortcut->parent_slot]; assoc_array_insert_mid_shortcut()
945 new_n0->slots[sc_slot] = assoc_array_shortcut_to_ptr(new_s1); assoc_array_insert_mid_shortcut()
960 new_n0->slots[sc_slot] = shortcut->next_node; assoc_array_insert_mid_shortcut()
969 edit->leaf_p = &new_n0->slots[1]; assoc_array_insert_mid_shortcut()
971 edit->leaf_p = &new_n0->slots[0]; assoc_array_insert_mid_shortcut()
1088 collapse->node->slots[collapse->slot++] = assoc_array_leaf_to_ptr(leaf); assoc_array_delete_collapse_iterator()
1141 ptr = node->slots[slot]; assoc_array_delete()
1162 edit->dead_leaf = node->slots[slot]; assoc_array_delete()
1163 edit->set[0].ptr = &node->slots[slot]; assoc_array_delete()
1199 ptr = node->slots[i]; assoc_array_delete()
1268 edit->set[1].ptr = &p->slots[node->parent_slot]; assoc_array_delete()
1553 ptr = node->slots[slot]; assoc_array_gc()
1563 new_n->slots[slot] = ptr; assoc_array_gc()
1567 new_ptr_pp = &new_n->slots[slot]; assoc_array_gc()
1574 /* Count up the number of empty slots in this node and work out the assoc_array_gc()
1580 ptr = new_n->slots[slot]; assoc_array_gc()
1594 ptr = new_n->slots[slot]; assoc_array_gc()
1618 new_n->slots[slot] = NULL; assoc_array_gc()
1623 struct assoc_array_ptr *p = child->slots[i]; assoc_array_gc()
1627 while (new_n->slots[next_slot]) assoc_array_gc()
1630 new_n->slots[next_slot++] = p; assoc_array_gc()
1648 if ((ptr = new_n->slots[slot])) assoc_array_gc()
1686 new_n->slots[slot] = ptr; assoc_array_gc()
1716 n->slots[slot] = assoc_array_node_to_ptr(new_n); assoc_array_gc()
H A Dradix-tree.c231 node->slots[0] = NULL; radix_tree_node_rcu_free()
360 node->slots[0] = slot; radix_tree_extend()
413 rcu_assign_pointer(node->slots[offset], slot); __radix_tree_create()
423 slot = node->slots[offset]; __radix_tree_create()
431 *slotp = node ? node->slots + offset : (void **)&root->rnode; __radix_tree_create()
517 slot = node->slots + ((index >> shift) & RADIX_TREE_MAP_MASK); __radix_tree_lookup()
605 slot = slot->slots[offset]; radix_tree_tag_set()
655 slot = slot->slots[offset]; radix_tree_tag_clear()
732 node = rcu_dereference_raw(node->slots[offset]); radix_tree_tag_get()
740 * radix_tree_next_chunk - find next chunk of slots for iteration
795 !node->slots[offset]) { radix_tree_next_chunk()
807 if (node->slots[offset]) radix_tree_next_chunk()
823 node = rcu_dereference_raw(node->slots[offset]); radix_tree_next_chunk()
852 return node->slots + offset; radix_tree_next_chunk()
918 if (!slot->slots[offset]) radix_tree_range_tag_if_tagged()
926 slot = slot->slots[offset]; radix_tree_range_tag_if_tagged()
1041 * their slots at *@results and returns the number of items which were
1125 * have the tag indexed by @tag set. Places the slots at *@results and
1126 * returns the number of slots which were placed at *@results.
1168 if (slot->slots[i] != NULL) __locate()
1180 slot = rcu_dereference_raw(slot->slots[i]); __locate()
1187 if (slot->slots[i] == item) { __locate()
1266 if (!to_free->slots[0]) radix_tree_shrink()
1273 * (to_free->slots[0]), it will be safe to dereference the new radix_tree_shrink()
1276 slot = to_free->slots[0]; radix_tree_shrink()
1303 *((unsigned long *)&to_free->slots[0]) |= radix_tree_shrink()
1343 parent->slots[offset] = NULL; __radix_tree_delete_node()
1404 node->slots[offset] = NULL; radix_tree_delete_item()
/linux-4.1.27/drivers/gpu/drm/atmel-hlcdc/
H A Datmel_hlcdc_layer.c74 slot = &upd->slots[id]; atmel_hlcdc_layer_update_reset()
101 slot = &upd->slots[upd->pending]; atmel_hlcdc_layer_update_apply()
370 slot = &upd->slots[upd->next]; atmel_hlcdc_layer_update_start()
397 upd->slots[upd->pending].configs, atmel_hlcdc_layer_update_start()
400 upd->slots[upd->pending].updated_configs, atmel_hlcdc_layer_update_start()
404 slot->fb_flip->fb = upd->slots[upd->pending].fb_flip->fb; atmel_hlcdc_layer_update_start()
405 if (upd->slots[upd->pending].fb_flip->fb) { atmel_hlcdc_layer_update_start()
407 upd->slots[upd->pending].fb_flip->fb; atmel_hlcdc_layer_update_start()
409 upd->slots[upd->pending].fb_flip->ngems; atmel_hlcdc_layer_update_start()
416 upd->slots[upd->next].configs, atmel_hlcdc_layer_update_start()
454 slot = &upd->slots[upd->next]; atmel_hlcdc_layer_update_set_fb()
489 slot = &upd->slots[upd->next]; atmel_hlcdc_layer_update_cfg()
505 slot = &upd->slots[upd->next]; atmel_hlcdc_layer_update_commit()
588 upd->slots[i].updated_configs = buffer; atmel_hlcdc_layer_update_init()
590 upd->slots[i].configs = buffer; atmel_hlcdc_layer_update_init()
/linux-4.1.27/drivers/mmc/host/
H A Dsdhci-pci.h86 struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */ member in struct:sdhci_pci_chip
H A Dsdhci-pci.c124 * slots number is fixed here for MRST as SDIO3/5 are never used and mrst_hc_probe()
524 jmicron_enable_mmc(chip->slots[i]->host, 0); jmicron_suspend()
537 jmicron_enable_mmc(chip->slots[i]->host, 1); jmicron_resume()
1238 slot = chip->slots[i]; sdhci_pci_suspend()
1272 sdhci_resume_host(chip->slots[i]->host); sdhci_pci_suspend()
1294 slot = chip->slots[i]; sdhci_pci_resume()
1318 slot = chip->slots[i]; sdhci_pci_runtime_suspend()
1338 sdhci_runtime_resume_host(chip->slots[i]->host); sdhci_pci_runtime_suspend()
1360 slot = chip->slots[i]; sdhci_pci_runtime_resume()
1588 u8 slots, first_bar; sdhci_pci_probe() local
1597 ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); sdhci_pci_probe()
1601 slots = PCI_SLOT_INFO_SLOTS(slots) + 1; sdhci_pci_probe()
1602 dev_dbg(&pdev->dev, "found %d slot(s)\n", slots); sdhci_pci_probe()
1603 if (slots == 0) sdhci_pci_probe()
1606 BUG_ON(slots > MAX_SLOTS); sdhci_pci_probe()
1636 chip->num_slots = slots; sdhci_pci_probe()
1646 slots = chip->num_slots; /* Quirk may have changed this */ sdhci_pci_probe()
1648 for (i = 0; i < slots; i++) { sdhci_pci_probe()
1652 sdhci_pci_remove_slot(chip->slots[i]); sdhci_pci_probe()
1657 chip->slots[i] = slot; sdhci_pci_probe()
1686 sdhci_pci_remove_slot(chip->slots[i]); sdhci_pci_remove()
H A Domap.c160 struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS]; member in struct:mmc_omap_host
280 if (host->slots[i] == NULL || host->slots[i]->mrq == NULL) mmc_omap_release_slot()
284 new_slot = host->slots[i]; mmc_omap_release_slot()
732 dev_info(mmc_dev(host->slots[0]->mmc), mmc_omap_irq()
861 struct mmc_omap_slot *slot = host->slots[num]; omap_mmc_notify_cover_event()
866 if (host->nr_slots == 0 || !host->slots[num]) omap_mmc_notify_cover_event()
1235 slot->pdata = &host->pdata->slots[id]; mmc_omap_new_slot()
1237 host->slots[id] = slot; mmc_omap_new_slot()
1240 if (host->pdata->slots[id].wires >= 4) mmc_omap_new_slot()
1333 dev_err(&pdev->dev, "no slots\n"); mmc_omap_probe()
1366 host->features = host->pdata->slots[0].features; mmc_omap_probe()
1429 mmc_omap_remove_slot(host->slots[i]); mmc_omap_probe()
1464 mmc_omap_remove_slot(host->slots[i]); mmc_omap_remove()
/linux-4.1.27/arch/mips/include/asm/sn/sn0/
H A Darch.h60 #define MAX_MEM_SLOTS 16 /* max slots per node */
62 #define MAX_MEM_SLOTS 32 /* max slots per node */
/linux-4.1.27/kernel/events/
H A Dhw_breakpoint.c145 fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, fetch_bp_busy_slots() argument
161 if (nr > slots->pinned) for_each_cpu()
162 slots->pinned = nr; for_each_cpu()
165 if (nr > slots->flexible) for_each_cpu()
166 slots->flexible = nr; for_each_cpu()
176 fetch_this_slot(struct bp_busy_slots *slots, int weight) fetch_this_slot() argument
178 slots->pinned += weight; fetch_this_slot()
282 struct bp_busy_slots slots = {0}; __reserve_bp_slot() local
298 fetch_bp_busy_slots(&slots, bp, type); __reserve_bp_slot()
303 fetch_this_slot(&slots, weight); __reserve_bp_slot()
306 if (slots.pinned + (!!slots.flexible) > nr_slots[type]) __reserve_bp_slot()
348 * Allow the kernel debugger to reserve breakpoint slots without
350 * release breakpoint slots.
/linux-4.1.27/include/linux/platform_data/
H A Dedma.h23 * more than twice as many slots as event channels.
44 /* PaRAM slots are laid out like this */
114 /* alloc/free DMA channels and their dedicated parameter RAM slots */
120 /* alloc/free parameter RAM slots */
124 /* alloc/free a set of contiguous parameter RAM slots */
H A Dmmc-omap.h19 /* number of slots per controller */
120 } slots[OMAP_MMC_MAX_SLOTS]; member in struct:omap_mmc_platform_data
/linux-4.1.27/drivers/pci/
H A Dslot.c200 list_for_each_entry(slot, &parent->slots, list) get_slot()
213 * @name: user visible string presented in /sys/bus/pci/slots/<name>
216 * PCI slots have first class attributes such as address, speed, width,
224 * name to multiple slots. Workaround these broken platforms by renaming
225 * the slots on behalf of the caller. If firmware assigns name N to
226 * multiple slots:
233 * Placeholder slots:
241 * may be many slots with @slot_nr of -1. The other change in semantics is
302 list_add(&slot->list, &parent->slots); pci_create_slot()
388 pci_slots_kset = kset_create_and_add("slots", NULL, pci_slot_init()
/linux-4.1.27/arch/arm/mach-omap2/
H A Dboard-n8x0.c171 * The two MMC slots are multiplexed via Menelaus companion chip over I2C.
172 * On N800, both slots are powered via Menelaus. On N810, only one of the
173 * slots is powered via Menelaus. The N810 EMMC is powered via GPIO.
438 * MMC controller1 has two slots that are multiplexed via I2C.
448 .slots[0] = {
457 .slots[1] = {
483 mmc1_data.slots[0].name = "external"; n8x0_mmc_init()
491 mmc1_data.slots[1].name = "internal"; n8x0_mmc_init()
492 mmc1_data.slots[1].ban_openended = 1; n8x0_mmc_init()
/linux-4.1.27/drivers/pci/hotplug/
H A Dibmphp_ebda.c80 struct ebda_hpc_slot *slots; alloc_ebda_hpc() local
87 slots = kcalloc(slot_count, sizeof(struct ebda_hpc_slot), GFP_KERNEL); alloc_ebda_hpc()
88 if (!slots) alloc_ebda_hpc()
90 controller->slots = slots; alloc_ebda_hpc()
99 kfree(controller->slots); alloc_ebda_hpc()
108 kfree (controller->slots); free_ebda_hpc()
212 debug ("%s - physical slot#: %x\n", __func__, hpc_ptr->slots[index].slot_num); print_ebda_hpc()
213 debug ("%s - pci bus# of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_bus_num); print_ebda_hpc()
214 debug ("%s - index into ctlr addr: %x\n", __func__, hpc_ptr->slots[index].ctl_index); print_ebda_hpc()
215 debug ("%s - cap of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_cap); print_ebda_hpc()
589 /* This routine will find out how many slots are in the chassis, so that
789 debug ("count of slots controlled by this ctlr: %x\n", slot_num); ebda_rsrc_controller()
793 slot_ptr = hpc_ptr->slots; ebda_rsrc_controller()
901 hpc_ptr->starting_slot_num = hpc_ptr->slots[0].slot_num; ebda_rsrc_controller()
902 hpc_ptr->ending_slot_num = hpc_ptr->slots[slot_num-1].slot_num; ebda_rsrc_controller()
904 // register slots with hpc core as well as create linked list of ibm slot ebda_rsrc_controller()
927 tmp_slot->capabilities = hpc_ptr->slots[index].slot_cap; ebda_rsrc_controller()
928 if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_133_MAX) == EBDA_SLOT_133_MAX) ebda_rsrc_controller()
930 else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_100_MAX) == EBDA_SLOT_100_MAX) ebda_rsrc_controller()
932 else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_66_MAX) == EBDA_SLOT_66_MAX) ebda_rsrc_controller()
935 if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_PCIX_CAP) == EBDA_SLOT_PCIX_CAP) ebda_rsrc_controller()
941 tmp_slot->bus = hpc_ptr->slots[index].slot_bus_num; ebda_rsrc_controller()
943 bus_info_ptr1 = ibmphp_find_same_bus_num (hpc_ptr->slots[index].slot_bus_num); ebda_rsrc_controller()
953 tmp_slot->ctlr_index = hpc_ptr->slots[index].ctl_index; ebda_rsrc_controller()
954 tmp_slot->number = hpc_ptr->slots[index].slot_num; ebda_rsrc_controller()
1090 * - the total number of the slots based on each bus
H A Dcpci_hotplug_core.c58 static int slots; variable
291 slots++; cpci_hp_register_bus()
314 if (!slots) { cpci_hp_unregister_bus()
321 slots--; cpci_hp_unregister_bus()
371 if (!slots) { init_slots()
403 if (!slots) { check_slots()
405 err("no slots registered, shutting down"); check_slots()
526 dbg("%s - error checking slots", __func__); event_thread()
558 dbg("%s - error checking slots", __func__); poll_thread()
630 * Unregister all of our slots with the pci_hotplug subsystem, cleanup_slots()
634 if (!slots) cleanup_slots()
H A Dibmphp_core.c63 * tables don't provide default info for empty slots */
124 /* sometimes the hot-pluggable slots start with 4 (not always from 1) */ get_max_slots()
157 if ((*cur_slot)->number == rtable->slots[loop].slot && ibmphp_init_devno()
158 (*cur_slot)->bus == rtable->slots[loop].bus) { ibmphp_init_devno()
159 (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn); ibmphp_init_devno()
175 debug("rtable->slots[loop].irq[0].bitmap = %x\n", ibmphp_init_devno()
176 rtable->slots[loop].irq[0].bitmap); ibmphp_init_devno()
177 debug("rtable->slots[loop].irq[1].bitmap = %x\n", ibmphp_init_devno()
178 rtable->slots[loop].irq[1].bitmap); ibmphp_init_devno()
179 debug("rtable->slots[loop].irq[2].bitmap = %x\n", ibmphp_init_devno()
180 rtable->slots[loop].irq[2].bitmap); ibmphp_init_devno()
181 debug("rtable->slots[loop].irq[3].bitmap = %x\n", ibmphp_init_devno()
182 rtable->slots[loop].irq[3].bitmap); ibmphp_init_devno()
184 debug("rtable->slots[loop].irq[0].link = %x\n", ibmphp_init_devno()
185 rtable->slots[loop].irq[0].link); ibmphp_init_devno()
186 debug("rtable->slots[loop].irq[1].link = %x\n", ibmphp_init_devno()
187 rtable->slots[loop].irq[1].link); ibmphp_init_devno()
188 debug("rtable->slots[loop].irq[2].link = %x\n", ibmphp_init_devno()
189 rtable->slots[loop].irq[2].link); ibmphp_init_devno()
190 debug("rtable->slots[loop].irq[3].link = %x\n", ibmphp_init_devno()
191 rtable->slots[loop].irq[3].link); ibmphp_init_devno()
498 * function. It will also power off empty slots that are powered on since BIOS
1297 debug("after slots\n"); ibmphp_unload()
H A Drpaphp_core.c270 * for built-in pci slots (even when the built-in slots are
296 * during boot time, if the hotplug slots are present at boot time,
302 * slots cannot be hotplugged.
363 * Unregister all of our slots with the pci_hotplug subsystem, cleanup_slots()
H A Dacpiphp_glue.c166 list_for_each_entry_safe(slot, next, &bridge->slots, node) { free_bridge()
312 list_for_each_entry(slot, &bridge->slots, node) acpiphp_add_context()
328 list_add_tail(&slot->node, &bridge->slots); acpiphp_add_context()
331 * Expose slots to user space for functions that have _EJ0 or _RMV or acpiphp_add_context()
334 * expose slots to user space in those cases. acpiphp_add_context()
377 list_for_each_entry(slot, &bridge->slots, node) { cleanup_bridge()
668 * Iterate over all slots under this bridge and make sure that if a
679 list_for_each_entry(slot, &bridge->slots, node) { acpiphp_check_bridge()
819 * acpiphp_enumerate_slots - Enumerate PCI slots for a given bus.
820 * @bus: PCI bus to enumerate the slots for.
846 INIT_LIST_HEAD(&bridge->slots); acpiphp_enumerate_slots()
897 acpi_handle_err(handle, "failed to register slots\n"); acpiphp_enumerate_slots()
H A Dpcihp_skeleton.c241 * replace this if your hardware provides a better way to name slots. make_slot_name()
329 * Unregister all of our slots with the pci_hotplug subsystem. cleanup_slots()
348 * determining the number of slots you have in the system pcihp_skel_init()
H A Dcpqphp_core.c146 /* Loop through slots */ init_SERR()
186 tbus = cpqhp_routing_table->slots[loop].bus; pci_print_IRQ_route()
187 tdevice = cpqhp_routing_table->slots[loop].devfn; pci_print_IRQ_route()
188 tslot = cpqhp_routing_table->slots[loop].slot; pci_print_IRQ_route()
348 tbus = cpqhp_routing_table->slots[loop].bus; get_slot_mapping()
349 tdevice = cpqhp_routing_table->slots[loop].devfn >> 3; get_slot_mapping()
350 tslot = cpqhp_routing_table->slots[loop].slot; get_slot_mapping()
1232 /* turn off empty slots here unless command line option "ON" set cpqhpc_probe()
1251 /* We have to save the presence info for these slots */ cpqhpc_probe()
/linux-4.1.27/fs/nfs/
H A Dnfs4session.h10 /* maximum number of slots to use */
35 struct nfs4_slot *slots; /* seqid per slot */ member in struct:nfs4_slot_table
39 u32 max_slots; /* # slots in table */
H A Dnfs4session.c35 * nfs4_shrink_slot_table - free retired slots from the slot table
43 p = &tbl->slots; nfs4_shrink_slot_table()
121 p = &tbl->slots; nfs4_find_or_create_slot()
191 p = &tbl->slots; nfs4_reset_slot_table()
227 dprintk("%s: tbl=%p slots=%p max_slots=%u\n", __func__, nfs4_realloc_slot_table()
228 tbl, tbl->slots, tbl->max_slots); nfs4_realloc_slot_table()
366 /* Deallocate slots */ nfs41_set_server_slotid_locked()
459 if (status && tbl->slots == NULL) nfs4_setup_session_slot_tables()
H A Dcallback.h206 * of slots for the backchannel.
/linux-4.1.27/drivers/eisa/
H A Dvirtual_root.c41 .slots = EISA_MAX_SLOTS,
H A Dpci_eisa.c56 pci_eisa_root.slots = EISA_MAX_SLOTS; pci_eisa_init()
/linux-4.1.27/arch/tile/include/gxio/
H A Ddma_queue.h72 /* Reserve slots in the queue, optionally waiting for slots to become
83 * Try to reserve 'num' egress command slots. We do this by __gxio_dma_queue_reserve()
131 * If any of our slots mod 256 were equivalent to 0, go ahead and __gxio_dma_queue_reserve()
/linux-4.1.27/arch/mips/sni/
H A Deisa.c28 .slots = EISA_MAX_SLOTS,
/linux-4.1.27/arch/arm/include/asm/hardware/
H A Diop_adma.h58 * @all_slots: complete domain of slots usable by the channel
82 * @slot_cnt: total slots used in an transaction (group of operations)
83 * @slots_per_op: number of slots per operation
87 * @group_list: list of slots that make up a multi-descriptor transaction
/linux-4.1.27/fs/fat/
H A Ddir.c28 * Maximum buffer size of unicode chars from slots.
29 * [(max longname slots * 13 (size in a slot) + nul) * sizeof(wchar_t)]
284 unsigned char id, slot, slots, alias_checksum; fat_parse_long() local
294 slots = 0; fat_parse_long()
299 slots = id & ~0x40; fat_parse_long()
300 if (slots > 20 || !slots) /* ceil(256 * 2 / 26) */ fat_parse_long()
302 *nr_slots = slots; fat_parse_long()
305 slot = slots; fat_parse_long()
1057 * Second stage: remove the remaining longname slots. fat_remove_entries()
1064 "Couldn't remove the long name slots"); fat_remove_entries()
1152 /* filling the new directory slots ("." and ".." entries) */ fat_alloc_new_dir()
1189 static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots, fat_add_new_entries() argument
1202 * size is 32*slots (672bytes). So, iff the cluster size is fat_add_new_entries()
1231 memcpy(bhs[n]->b_data, slots, copy); fat_add_new_entries()
1232 slots += copy; fat_add_new_entries()
1269 int fat_add_entries(struct inode *dir, void *slots, int nr_slots, fat_add_entries() argument
1274 struct buffer_head *bh, *prev, *bhs[3]; /* 32*slots (672bytes) */ fat_add_entries()
1324 * NOTE: If this slots has shortname, first, we write fat_add_entries()
1325 * the long name slots, then write the short name. fat_add_entries()
1331 /* Fill the long name slots. */ fat_add_entries()
1334 memcpy(bhs[i]->b_data + offset, slots, copy); fat_add_entries()
1337 slots += copy; fat_add_entries()
1345 memcpy(bhs[i]->b_data + offset, slots, copy); fat_add_entries()
1364 cluster = fat_add_new_entries(dir, slots, nr_slots, &nr_cluster, fat_add_entries()
H A Dnamei_vfat.c581 struct msdos_dir_slot *slots, int *nr_slots) vfat_build_slots()
615 de = (struct msdos_dir_entry *)slots; vfat_build_slots()
624 for (ps = slots, i = *nr_slots; i > 0; i--, ps++) { vfat_build_slots()
635 slots[0].id |= 0x40; vfat_build_slots()
659 struct msdos_dir_slot *slots; vfat_add_entry() local
667 slots = kmalloc(sizeof(*slots) * MSDOS_SLOTS, GFP_NOFS); vfat_add_entry()
668 if (slots == NULL) vfat_add_entry()
672 slots, &nr_slots); vfat_add_entry()
676 err = fat_add_entries(dir, slots, nr_slots, sinfo); vfat_add_entry()
687 kfree(slots); vfat_add_entry()
578 vfat_build_slots(struct inode *dir, const unsigned char *name, int len, int is_dir, int cluster, struct timespec *ts, struct msdos_dir_slot *slots, int *nr_slots) vfat_build_slots() argument
/linux-4.1.27/sound/soc/ux500/
H A Dux500_msp_dai.h57 int slots; member in struct:ux500_msp_i2s_drvdata
H A Dmop500_ab8500.c210 /* Reset slots configuration to default(s) */ mop500_ab8500_shutdown()
225 int channels, ret = 0, driver_mode, slots; mop500_ab8500_hw_params() local
297 /* Setup TDM-slots */ mop500_ab8500_hw_params()
302 slots = 16; mop500_ab8500_hw_params()
307 slots = 16; mop500_ab8500_hw_params()
312 slots = 16; mop500_ab8500_hw_params()
327 ret = snd_soc_dai_set_tdm_slot(cpu_dai, tx_slots, rx_slots, slots, mop500_ab8500_hw_params()
334 ret = snd_soc_dai_set_tdm_slot(codec_dai, tx_slots, rx_slots, slots, mop500_ab8500_hw_params()
H A Dux500_msp_dai.c40 if (drvdata->slots > 1) { setup_pcm_multichan()
59 __func__, drvdata->slots, multi->tx_channel_0_enable, setup_pcm_multichan()
71 switch (drvdata->slots) { setup_frameper()
115 "%s: Error: Unsupported slot-count (slots = %d)!\n", setup_frameper()
116 __func__, drvdata->slots); setup_frameper()
138 switch (drvdata->slots) { setup_pcm_framing()
156 "%s: Error: Unsupported slot-count (slots = %d)!\n", setup_pcm_framing()
157 __func__, drvdata->slots); setup_pcm_framing()
523 dev_dbg(dai->dev, "TDM-slots active: %d", slots_active); ux500_msp_dai_hw_params()
584 int slots, int slot_width) ux500_msp_dai_set_tdm_slot()
589 switch (slots) { ux500_msp_dai_set_tdm_slot()
604 __func__, slots); ux500_msp_dai_set_tdm_slot()
607 drvdata->slots = slots; ux500_msp_dai_set_tdm_slot()
760 drvdata->slots = 1; ux500_msp_drv_probe()
581 ux500_msp_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) ux500_msp_dai_set_tdm_slot() argument
/linux-4.1.27/include/linux/
H A DmISDNdsp.h32 int pcm_slots; /* number of slots on the pcm bus */
H A Dipack.h170 * @slots: number of slots available
177 int slots; member in struct:ipack_bus_device
186 * @slots: number of slots available in the bus device.
192 struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
H A Dpagevec.h58 * Add a page to a pagevec. Returns the number of slots still available.
H A Datmel-mci.h22 * Note that support for multiple slots is experimental -- some cards
H A Dradix-tree.h102 void __rcu *slots[RADIX_TREE_MAP_SIZE]; member in struct:radix_tree_node
316 * This radix tree iterator works in terms of "chunks" of slots. A chunk is a
317 * subinterval of slots contained within one radix tree leaf node. It is
320 * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
330 #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */
357 * radix_tree_next_chunk - find next chunk of slots for iteration
461 * radix_tree_for_each_chunk_slot - iterate over slots in one chunk
474 * radix_tree_for_each_slot - iterate over non-empty slots
489 * radix_tree_for_each_contig - iterate over contiguous slots
506 * radix_tree_for_each_tagged - iterate over tagged slots
H A Dkvm_host.h437 #define kvm_for_each_memslot(memslot, slots) \
438 for (memslot = &slots->memslots[0]; \
439 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
484 id_to_memslot(struct kvm_memslots *slots, int id) id_to_memslot() argument
486 int index = slots->id_to_index[id]; id_to_memslot()
489 slot = &slots->memslots[index]; id_to_memslot()
813 search_memslots(struct kvm_memslots *slots, gfn_t gfn) search_memslots() argument
815 int start = 0, end = slots->used_slots; search_memslots()
816 int slot = atomic_read(&slots->lru_slot); search_memslots()
817 struct kvm_memory_slot *memslots = slots->memslots; search_memslots()
834 atomic_set(&slots->lru_slot, start); search_memslots()
842 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) __gfn_to_memslot() argument
844 return search_memslots(slots, gfn); __gfn_to_memslot()
H A Dassoc_array_priv.h21 #define ASSOC_ARRAY_FAN_OUT 16 /* Number of slots per node */
59 struct assoc_array_ptr *slots[ASSOC_ARRAY_FAN_OUT]; member in struct:assoc_array_node
H A Dcb710.h40 unsigned slots; member in struct:cb710_chip
45 /* NOTE: cb710_chip.slots is modified only during device init/exit and
H A Dasn1_ber_bytecode.h32 /* The tag-matching ops come first and the odd-numbered slots
H A Deisa.h96 int slots; /* Max slot number */ member in struct:eisa_root_device
/linux-4.1.27/drivers/ipack/carriers/
H A Dtpci200.c156 slot_irq = rcu_dereference(tpci200->slots[i].irq); tpci200_interrupt()
182 if (tpci200->slots[dev->slot].irq == NULL) { tpci200_free_irq()
188 slot_irq = tpci200->slots[dev->slot].irq; tpci200_free_irq()
190 RCU_INIT_POINTER(tpci200->slots[dev->slot].irq, NULL); tpci200_free_irq()
211 if (tpci200->slots[dev->slot].irq != NULL) { tpci200_request_irq()
239 rcu_assign_pointer(tpci200->slots[dev->slot].irq, slot_irq); tpci200_request_irq()
443 kfree(tpci200->slots); tpci200_uninstall()
460 tpci200->slots = kzalloc( tpci200_install()
462 if (tpci200->slots == NULL) tpci200_install()
467 kfree(tpci200->slots); tpci200_install()
468 tpci200->slots = NULL; tpci200_install()
/linux-4.1.27/drivers/gpu/drm/omapdrm/
H A Dtcm.h73 s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
154 * @param slots Number of (contiguous) slots to reserve.
163 static inline s32 tcm_reserve_1d(struct tcm *tcm, u32 slots, tcm_reserve_1d() argument
168 (area == NULL || slots == 0) ? -EINVAL : tcm_reserve_1d()
169 slots > (tcm->width * (u32) tcm->height) ? -ENOMEM : 0; tcm_reserve_1d()
173 res = tcm->reserve_1d(tcm, slots, area); tcm_reserve_1d()
285 /* calculate number of slots in an area */ __tcm_sizeof()
H A Dtcm-sita.h62 * Statistics on immediately neighboring slots. Edge is the number of
/linux-4.1.27/drivers/misc/cb710/
H A Dcore.c77 for (nr = chip->slots; nr; ++slot, --nr) { cb710_irq_handler()
102 int nr = chip->slots; cb710_register_slot()
112 ++chip->slots; cb710_register_slot()
133 --chip->slots; cb710_register_slot()
145 int nr = chip->slots - 1; cb710_unregister_slot()
157 --chip->slots; cb710_unregister_slot()
/linux-4.1.27/arch/x86/kvm/
H A Diommu.c154 struct kvm_memslots *slots; kvm_iommu_map_memslots() local
161 slots = kvm_memslots(kvm); kvm_iommu_map_memslots()
163 kvm_for_each_memslot(memslot, slots) { kvm_for_each_memslot()
322 struct kvm_memslots *slots; kvm_iommu_unmap_memslots() local
326 slots = kvm_memslots(kvm); kvm_iommu_unmap_memslots()
328 kvm_for_each_memslot(memslot, slots) kvm_iommu_unmap_memslots()
/linux-4.1.27/arch/x86/boot/compressed/
H A Daslr.c209 static unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / variable
215 /* Overflowing the slots list should be impossible. */ slots_append()
220 slots[slot_max++] = addr; slots_append()
225 /* Handle case of no slots stored. */ slots_fetch_random()
229 return slots[get_random_long() % slot_max]; slots_fetch_random()
290 /* Verify potential e820 positions, appending to slots list. */ find_random_addr()
/linux-4.1.27/drivers/gpu/host1x/
H A Dcdma.c108 * Pop a number of two word slots from the push buffer
111 static void host1x_pushbuffer_pop(struct push_buffer *pb, unsigned int slots) host1x_pushbuffer_pop() argument
114 pb->fence = (pb->fence + slots * 8) & (pb->size_bytes - 1); host1x_pushbuffer_pop()
118 * Return the number of two word slots free in the push buffer
208 * - pop their push buffer slots
247 /* Pop push buffer slots */ update_cdma_locked()
303 * still in the current context (slots are also NOP-ed). host1x_cdma_update_sync_queue()
307 * the order (slots are modified to be a GATHER of syncpt incrs). host1x_cdma_update_sync_queue()
311 * modified NOP-ed PB slots). This lets things appear to have completed host1x_cdma_update_sync_queue()
458 * Kick off DMA, add job to the sync queue, and a number of slots to be freed
H A Dcdma.h72 unsigned int slots_used; /* pb slots used in current submit */
73 unsigned int slots_free; /* pb slots free in current submit */
/linux-4.1.27/sound/soc/fsl/
H A Dfsl_esai.c40 * @slots: number of slots
59 u32 slots; member in struct:fsl_esai
93 dev_dbg(&pdev->dev, "isr: Transmitting even slots\n"); esai_isr()
105 dev_dbg(&pdev->dev, "isr: Receiving even slots\n"); esai_isr()
346 u32 rx_mask, int slots, int slot_width) fsl_esai_set_dai_tdm_slot()
351 ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots)); fsl_esai_set_dai_tdm_slot()
359 ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots)); fsl_esai_set_dai_tdm_slot()
367 esai_priv->slots = slots; fsl_esai_set_dai_tdm_slot()
515 u32 pins = DIV_ROUND_UP(channels, esai_priv->slots); fsl_esai_hw_params()
524 bclk = params_rate(params) * slot_width * esai_priv->slots; fsl_esai_hw_params()
576 u32 pins = DIV_ROUND_UP(channels, esai_priv->slots); fsl_esai_trigger()
789 esai_priv->slots = 2; fsl_esai_probe()
345 fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask, u32 rx_mask, int slots, int slot_width) fsl_esai_set_dai_tdm_slot() argument
H A Dmpc5200_psc_ac97.c192 psc_dma->slots |= s->ac97_slot_bits; psc_ac97_trigger()
193 out_be32(&psc_dma->psc_regs->ac97_slots, psc_dma->slots); psc_ac97_trigger()
201 psc_dma->slots &= ~(s->ac97_slot_bits); psc_ac97_trigger()
202 out_be32(&psc_dma->psc_regs->ac97_slots, psc_dma->slots); psc_ac97_trigger()
314 /* No slots active */ psc_ac97_of_probe()
H A Dimx-ssi.c17 * one FIFO which combines all valid receive slots. We cannot even select
18 * which slots we want to receive. The WM9712 with which this driver
58 * SSI Network Mode or TDM slots configuration.
62 unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) imx_ssi_set_dai_tdm_slot()
69 sccr |= SSI_STCCR_DC(slots - 1); imx_ssi_set_dai_tdm_slot()
74 sccr |= SSI_STCCR_DC(slots - 1); imx_ssi_set_dai_tdm_slot()
61 imx_ssi_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) imx_ssi_set_dai_tdm_slot() argument
H A Dmpc5200_dma.h62 unsigned int slots; member in struct:psc_dma
/linux-4.1.27/arch/arm/mach-omap1/
H A Ddevices.c114 if (mmc_controller->slots[0].wires == 4 && !cpu_is_omap7xx()) { omap1_mmc_mux()
117 if (!mmc_controller->slots[0].nomux) omap1_mmc_mux()
125 if (!mmc_controller->slots[1].nomux) { omap1_mmc_mux()
130 if (mmc_controller->slots[1].wires == 4) { omap1_mmc_mux()
181 data->slots[0].features = MMC_OMAP7XX; omap_mmc_add()
183 data->slots[0].features = MMC_OMAP15XX; omap_mmc_add()
185 data->slots[0].features = MMC_OMAP16XX; omap_mmc_add()
H A Dboard-h2-mmc.c56 .slots[0] = {
H A Dboard-h3-mmc.c38 .slots[0] = {
H A Dboard-sx1-mmc.c46 .slots[0] = {
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dradeon_dp_mst.c65 int slots) radeon_dp_mst_set_stream_attrib()
78 val = NI_DP_MSE_SAT_SLOT_COUNT0(slots) | NI_DP_MSE_SAT_SRC0(fe); radeon_dp_mst_set_stream_attrib()
131 new_attribs[idx].slots = drm_dp_mst_get_vcpi_slots(&mst_conn->mst_mgr, mst_enc->port); radeon_dp_mst_update_stream_attribs()
137 new_attribs[i].slots != mst_conn->cur_stream_attribs[i].slots) { radeon_dp_mst_update_stream_attribs()
138 radeon_dp_mst_set_stream_attrib(primary, i, new_attribs[i].fe, new_attribs[i].slots); radeon_dp_mst_update_stream_attribs()
140 mst_conn->cur_stream_attribs[i].slots = new_attribs[i].slots; radeon_dp_mst_update_stream_attribs()
147 mst_conn->cur_stream_attribs[i].slots = 0; radeon_dp_mst_update_stream_attribs()
391 int ret, slots; radeon_mst_encoder_dpms() local
446 mst_enc->pbn, &slots); radeon_mst_encoder_dpms()
454 radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0); radeon_mst_encoder_dpms()
780 radeon_connector->cur_stream_attribs[i].slots); radeon_debugfs_mst_info()
62 radeon_dp_mst_set_stream_attrib(struct radeon_encoder *primary, int stream_number, int fe, int slots) radeon_dp_mst_set_stream_attrib() argument
/linux-4.1.27/fs/dlm/
H A Dmember.c95 log_rinfo(ls, "generation %u slots %d%s", gen, num_slots, line); log_slots()
162 /* for any nodes that do not support slots, we will not have set memb->slot
164 assign slots or set ls_num_slots here */
192 /* node doesn't support slots */ dlm_slots_assign()
227 /* fill in slots (offsets) that are used */ dlm_slots_assign()
244 /* assign new slots from unused offsets */ dlm_slots_assign()
488 struct dlm_slot *slots; dlm_lsop_recover_done() local
496 slots = kzalloc(num * sizeof(struct dlm_slot), GFP_KERNEL); dlm_lsop_recover_done()
497 if (!slots) dlm_lsop_recover_done()
506 slots[i].nodeid = memb->nodeid; dlm_lsop_recover_done()
507 slots[i].slot = memb->slot; dlm_lsop_recover_done()
511 ls->ls_ops->recover_done(ls->ls_ops_arg, slots, num, dlm_lsop_recover_done()
514 kfree(slots); dlm_lsop_recover_done()
/linux-4.1.27/fs/jfs/
H A Djfs_dtree.h41 * additional segments/slots linked vi next field;
76 /* compute number of slots for entry */
135 /* compute number of slots for entry */
176 * contiguous slots at slot specified by stblindex,
203 u8 maxslot; /* 1: number of slots in page slot[] */
/linux-4.1.27/scripts/kconfig/
H A Dqconf.h71 public slots:
231 public slots:
259 public slots:
288 public slots:
309 public slots:
/linux-4.1.27/drivers/net/wireless/ath/ath9k/
H A Dmci.h116 u16 T; /* Voice: Tvoice, HID: Tsniff, in slots */
117 u8 W; /* Voice: Wvoice, HID: Sniff timeout, in slots */
118 u8 A; /* HID: Sniff attempt, in slots */
/linux-4.1.27/drivers/net/wireless/b43legacy/
H A Ddma.h133 /* Number of descriptor slots in the ring. */
135 /* Number of used descriptor slots. */
158 /* Maximum number of used slots. */
H A Dpio.h62 /* Used packet slots in the device internal TX buffer. */
/linux-4.1.27/drivers/media/dvb-core/
H A Ddvb_ca_en50221.h49 * called for different slots concurrently and need to use locks where
61 /* Functions for controlling slots */
121 * @param slot_count Number of slots supported.
/linux-4.1.27/sound/arm/
H A Daaci.c388 unsigned int mask = 1 << 0, slots; aaci_rule_channels() local
391 slots = aaci->ac97_bus->pcms[0].r[0].slots; aaci_rule_channels()
392 if (slots & (1 << AC97_SLOT_PCM_SLEFT)) { aaci_rule_channels()
394 if (slots & (1 << AC97_SLOT_LFE)) aaci_rule_channels()
432 if (aacirun->pcm->r[1].slots) aaci_pcm_open()
529 aacirun->pcm->r[dbl].slots); aaci_pcm_hw_params()
794 .slots = (1 << AC97_SLOT_PCM_LEFT) |
802 .slots = (1 << AC97_SLOT_PCM_LEFT) |
814 .slots = (1 << AC97_SLOT_PCM_LEFT) |
824 .slots = (1 << AC97_SLOT_MIC),
957 * Enable the channel, but don't assign it to any slots, so aaci_size_fifo()
/linux-4.1.27/drivers/char/agp/
H A Disoch.c224 /* Figure the number of isochronous and asynchronous RQ slots the
232 dev_err(&td->dev, "number of request queue slots "
241 * well as the total number of leftover isochronous RQ slots. */
246 /* Distribute the extra RQ slots calculated above and write our
280 * This function basically allocates request queue slots among the
282 * pretty stupid, divide the total number of RQ slots provided by the
283 * target by ndevs. Distribute this many slots to each AGP 3.0 device,
284 * giving any left over slots to the last device in dev_list.
/linux-4.1.27/arch/mips/include/asm/dec/
H A Dkn05.h28 * decoder. Certain address ranges within the "high" 16 slots are
30 * Others are handled locally. "Low" slots are always passed.
/linux-4.1.27/include/uapi/linux/dvb/
H A Dca.h56 unsigned int slot_num; /* total number of CA card and module slots */
58 unsigned int descr_num; /* total number of descrambler slots (keys) */
/linux-4.1.27/include/uapi/linux/hsi/
H A Dcs-protocol.h71 __u32 rx_bufs; /* number of RX buffer slots */
72 __u32 tx_bufs; /* number of TX buffer slots */
/linux-4.1.27/mm/
H A Dworkingset.c82 * 2. Moving one inactive page N page slots towards the tail of the
89 * the number of page slots on the inactive list.
106 * slots in the cache were available:
116 * had (R - E) more page slots, the page would not have been evicted
290 * On 64-bit with 7 radix_tree_nodes per page and 64 slots count_shadow_nodes()
349 if (node->slots[i]) { shadow_lru_isolate()
350 BUG_ON(!radix_tree_exceptional_entry(node->slots[i])); shadow_lru_isolate()
351 node->slots[i] = NULL; shadow_lru_isolate()
/linux-4.1.27/security/selinux/ss/
H A Davtab.h21 * Tuned number of hash slots for avtab to reduce memory usage
58 u32 nslot; /* number of hash slots */
H A Dhashtab.h23 u32 size; /* number of slots in hash table */
/linux-4.1.27/drivers/net/ethernet/apm/xgene/
H A Dxgene_enet_main.c36 for (i = 0; i < buf_pool->slots; i++) { xgene_enet_init_bufpool()
55 u32 slots = buf_pool->slots - 1; xgene_enet_refill_bufpool() local
82 tail = (tail + 1) & slots; xgene_enet_refill_bufpool()
119 u32 slots = buf_pool->slots - 1; xgene_enet_delete_bufpool() local
126 tail = (tail - 1) & slots; xgene_enet_delete_bufpool()
281 tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1); xgene_enet_start_xmit()
368 u16 slots = ring->slots - 1; xgene_enet_process_ring() local
384 head = (head + 1) & slots; xgene_enet_process_ring()
674 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n", xgene_enet_create_desc_ring()
675 ring->num, ring->size, ring->id, ring->slots); xgene_enet_create_desc_ring()
725 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, xgene_enet_create_desc_rings()
762 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, xgene_enet_create_desc_rings()
771 pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; xgene_enet_create_desc_rings()
772 pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2; xgene_enet_create_desc_rings()
/linux-4.1.27/arch/arm/kernel/
H A Dhw_breakpoint.c337 struct perf_event **slot, **slots; arch_install_hw_breakpoint() local
348 slots = this_cpu_ptr(bp_on_reg); arch_install_hw_breakpoint()
354 slots = this_cpu_ptr(wp_on_reg); arch_install_hw_breakpoint()
359 slot = &slots[i]; arch_install_hw_breakpoint()
394 struct perf_event **slot, **slots; arch_uninstall_hw_breakpoint() local
400 slots = this_cpu_ptr(bp_on_reg); arch_uninstall_hw_breakpoint()
405 slots = this_cpu_ptr(wp_on_reg); arch_uninstall_hw_breakpoint()
411 slot = &slots[i]; arch_uninstall_hw_breakpoint()
697 struct perf_event *wp, **slots; watchpoint_handler() local
701 slots = this_cpu_ptr(wp_on_reg); watchpoint_handler()
706 wp = slots[i]; watchpoint_handler()
769 struct perf_event *wp, **slots; watchpoint_single_step_handler() local
772 slots = this_cpu_ptr(wp_on_reg); watchpoint_single_step_handler()
777 wp = slots[i]; watchpoint_single_step_handler()
802 struct perf_event *bp, **slots; breakpoint_handler() local
806 slots = this_cpu_ptr(bp_on_reg); breakpoint_handler()
815 bp = slots[i]; breakpoint_handler()
H A Dprocess.c346 unsigned int slots; sigpage_addr() local
360 slots = ((last - first) >> PAGE_SHIFT) + 1; sigpage_addr()
362 offset = get_random_int() % slots; sigpage_addr()
/linux-4.1.27/sound/core/oss/
H A Dmixer_oss.c148 pslot = &mixer->slots[chn]; snd_mixer_oss_devmask()
164 pslot = &mixer->slots[chn]; snd_mixer_oss_stereodevs()
184 pslot = &mixer->slots[chn]; snd_mixer_oss_recmask()
209 pslot = &mixer->slots[chn]; snd_mixer_oss_get_recsrc()
239 pslot = &mixer->slots[chn]; snd_mixer_oss_set_recsrc()
247 pslot = &mixer->slots[chn]; snd_mixer_oss_set_recsrc()
267 pslot = &mixer->slots[slot]; snd_mixer_oss_get_volume()
295 pslot = &mixer->slots[slot]; snd_mixer_oss_set_volume()
827 pslot = &mixer->slots[idx]; snd_mixer_oss_get_recsrc2()
876 pslot = &mixer->slots[idx]; snd_mixer_oss_put_recsrc2()
1037 if (mixer->slots[ptr->oss_id].get_volume && ! replace_old) snd_mixer_oss_build_input()
1092 rslot = &mixer->slots[ptr->oss_id]; snd_mixer_oss_build_input()
1162 p = (struct slot *)mixer->slots[i].private_data; snd_mixer_oss_proc_read()
1198 mixer_slot_clear(&mixer->slots[ch]); snd_mixer_oss_proc_write()
1209 slot = (struct slot *)mixer->slots[ch].private_data; snd_mixer_oss_proc_write()
1332 struct snd_mixer_oss_slot *chn = &mixer->slots[idx]; snd_mixer_oss_free1()
1373 mixer->slots[idx].number = idx; snd_mixer_oss_notify_handler()
/linux-4.1.27/drivers/usb/isp1760/
H A Disp1760-hcd.c721 struct isp1760_slotinfo *slots, start_bus_transfer()
730 WARN_ON(slots[slot].qtd); start_bus_transfer()
731 WARN_ON(slots[slot].qh); start_bus_transfer()
747 slots[slot].timestamp = jiffies; start_bus_transfer()
748 slots[slot].qtd = qtd; start_bus_transfer()
749 slots[slot].qh = qh; start_bus_transfer()
836 struct isp1760_slotinfo *slots; enqueue_qtds() local
854 slots = priv->int_slots; enqueue_qtds()
857 slots = priv->atl_slots; enqueue_qtds()
862 if ((free_slot == -1) && (slots[curr_slot].qtd == NULL)) enqueue_qtds()
864 if (slots[curr_slot].qh == qh) enqueue_qtds()
900 slots, qtd, qh, &ptd); enqueue_qtds()
958 * and PTD ATL slots are available.
960 * and PTD INT slots are available.
962 * and PTD ATL slots are available.
1076 struct isp1760_slotinfo *slots; handle_done_ptds() local
1094 slots = priv->int_slots; handle_done_ptds()
1097 if (!slots[slot].qh) { handle_done_ptds()
1104 slots[slot].qtd->urb); handle_done_ptds()
1109 slots = priv->atl_slots; handle_done_ptds()
1112 if (!slots[slot].qh) { handle_done_ptds()
1119 slots[slot].qtd->urb); handle_done_ptds()
1122 qtd = slots[slot].qtd; handle_done_ptds()
1123 slots[slot].qtd = NULL; handle_done_ptds()
1124 qh = slots[slot].qh; handle_done_ptds()
1125 slots[slot].qh = NULL; handle_done_ptds()
1186 if (slots == priv->int_slots) { handle_done_ptds()
1198 start_bus_transfer(hcd, ptd_offset, slot, slots, qtd, handle_done_ptds()
1252 * finds active ATL slots which are older than SLOT_TIMEOUT ms, it checks the
720 start_bus_transfer(struct usb_hcd *hcd, u32 ptd_offset, int slot, struct isp1760_slotinfo *slots, struct isp1760_qtd *qtd, struct isp1760_qh *qh, struct ptd *ptd) start_bus_transfer() argument
/linux-4.1.27/drivers/dma/ppc4xx/
H A Dadma.h82 * @all_slots: complete domain of slots usable by the channel
129 * @group_list: list of slots that make up a multi-descriptor transaction
135 * @slot_cnt: total slots used in an transaction (group of operations)
138 * @slots_per_op: number of slots per operation
/linux-4.1.27/arch/x86/include/asm/
H A Dlinkage.h17 * stack slots for temporaries, since they are live and "used"
H A Dpage_64_types.h35 * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
/linux-4.1.27/arch/sparc/include/asm/
H A Dpci_64.h27 /* PCI 64-bit addressing works for all slots on all controller
H A Dsbi.h55 /* intr_state has 4 bits for slots 0 .. 3 and these bits are repeated for each sbus irq level
/linux-4.1.27/drivers/gpu/drm/nouveau/
H A Dnouveau_dma.c137 nv50_dma_wait(struct nouveau_channel *chan, int slots, int count) nv50_dma_wait() argument
142 ret = nv50_dma_push_wait(chan, slots + 1); nv50_dma_wait()
180 nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size) nouveau_dma_wait() argument
186 return nv50_dma_wait(chan, slots, size); nouveau_dma_wait()
/linux-4.1.27/drivers/clk/rockchip/
H A Dclk-rockchip.c69 /* ignore empty slots */ rk2928_gate_clk_init()
/linux-4.1.27/arch/mips/dec/
H A Dtc.c37 * the slot space base address and the number of slots.
/linux-4.1.27/include/linux/mtd/
H A Dspear_smi.h19 /* max possible slots for serial-nor flash chip in the SMI controller */
/linux-4.1.27/arch/mips/pci/
H A Dfixup-ip32.c38 * in theory have slots 4 and 5, and we never normally use the shared
H A Dfixup-pmcmsp.c49 /* Garibaldi Board IRQ wiring to PCI slots */
88 /* MSP7120 Eval Board IRQ wiring to PCI slots */
/linux-4.1.27/arch/mn10300/unit-asb2305/
H A Dpci-asb2305.h66 struct irq_info slots[0]; member in struct:irq_routing_table
/linux-4.1.27/arch/m68k/include/asm/
H A Dlinkage.h12 * stack slots for temporaries, since they are live and "used"
/linux-4.1.27/arch/arm/mach-mv78xx0/
H A Ddb78x00-bp-setup.c82 * #0, and let CPU core #1 have the four x1 slots. db78x00_pci_init()
H A Dbuffalo-wxl-setup.c134 * #0, and let CPU core #1 have the four x1 slots. wxl_pci_init()
/linux-4.1.27/arch/arc/include/uapi/asm/
H A Dptrace.h47 long efa; /* break pt addr, for break points in delay slots */
/linux-4.1.27/arch/tile/include/hv/
H A Ddrv_xgbe_impl.h106 * This queue has 10 slots, and thus can hold 9 packets (_last_packet_plus_one
108 * there are valid, unread packets in slots 2, 3, 4, and 5. The remaining
109 * slots are invalid (do not contain a packet).
171 * This queue has 10 slots, and thus can hold 9 buffers (_last_buffer = 9).
173 * valid, unread buffers in slots 2, 1, 0, 9, 8, and 7. The requested write
175 * will be placed in slots 6 and 5 when they arrive. Finally, the remaining
176 * slots are invalid (do not contain a buffer).
/linux-4.1.27/sound/oss/
H A Duart401.c298 hw_config->slots[4] = -1; probe_uart401()
377 hw_config->slots[4] = devc->my_dev; probe_uart401()
398 int n=hw_config->slots[4]; unload_uart401()
406 devc = midi_devs[hw_config->slots[4]]->devc; unload_uart401()
420 sound_unload_mididev(hw_config->slots[4]); unload_uart401()
/linux-4.1.27/arch/arm/common/
H A Dedma.c236 /* actual number of DMA channels and slots on this silicon */
274 /* dummy param set used to (re)initialize parameter RAM slots */
548 * if we run out parameter RAM slots, i.e we do find a set reserve_contiguous_slots()
549 * of contiguous parameter RAM slots but do not find the exact number reserve_contiguous_slots()
550 * requested as we may reach the total number of parameter RAM slots reserve_contiguous_slots()
614 /* Resource alloc/free: dma channels, parameter RAM slots */
760 * slots may be allocated on behalf of DSP firmware.
823 * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
825 * contiguous parameter RAM slots that have been requested
829 * @count: number of contiguous Paramter RAM slots
834 * contiguous Parameter RAM slots from parameter RAM 64 in the case of
838 * set of contiguous parameter RAM slots from the "slot" that is passed as an
844 * find a set of contiguous Parameter RAM slots from the remaining Parameter
845 * RAM slots
852 * of slots edma_alloc_cont_slots()
860 * The number of parameter RAM slots requested cannot be less than 1 edma_alloc_cont_slots()
861 * and cannot be more than the number of slots minus the number of edma_alloc_cont_slots()
883 * edma_free_cont_slots - deallocate DMA parameter RAM slots
884 * @slot: first parameter RAM of a set of parameter RAM slots to be freed
885 * @count: the number of contiguous parameter RAM slots to be freed
887 * This deallocates the parameter RAM slots allocated by
890 * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
892 * Callers are responsible for ensuring the slots are inactive, and will
923 /* Parameter RAM operations (i) -- read/write partial slots */
1082 * transfers using the fields in PaRAM slots. If you are not doing it
1728 /* Set the reserved slots in inuse list */ edma_probe()
/linux-4.1.27/virt/kvm/
H A Dkvm_main.c446 struct kvm_memslots *slots = kvm->memslots; kvm_init_memslots_id() local
449 slots->id_to_index[i] = slots->memslots[i].id = i; kvm_init_memslots_id()
569 struct kvm_memslots *slots = kvm->memslots; kvm_free_physmem() local
572 kvm_for_each_memslot(memslot, slots) kvm_free_physmem()
664 static void update_memslots(struct kvm_memslots *slots, update_memslots() argument
668 int i = slots->id_to_index[id]; update_memslots()
669 struct kvm_memory_slot *mslots = slots->memslots; update_memslots()
677 slots->used_slots--; update_memslots()
680 slots->used_slots++; update_memslots()
688 slots->id_to_index[mslots[i].id] = i; update_memslots()
705 slots->id_to_index[mslots[i].id] = i; update_memslots()
709 WARN_ON_ONCE(i != slots->used_slots); update_memslots()
712 slots->id_to_index[mslots[i].id] = i; update_memslots()
730 struct kvm_memslots *slots) install_new_memslots()
739 slots->generation = old_memslots->generation + 1; install_new_memslots()
741 rcu_assign_pointer(kvm->memslots, slots); install_new_memslots()
749 slots->generation++; install_new_memslots()
772 struct kvm_memslots *slots = NULL, *old_memslots; __kvm_set_memory_region() local
868 slots = kvm_kvzalloc(sizeof(struct kvm_memslots)); __kvm_set_memory_region()
869 if (!slots) __kvm_set_memory_region()
871 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); __kvm_set_memory_region()
874 slot = id_to_memslot(slots, mem->slot); __kvm_set_memory_region()
877 old_memslots = install_new_memslots(kvm, slots); __kvm_set_memory_region()
895 slots = old_memslots; __kvm_set_memory_region()
908 update_memslots(slots, &new); __kvm_set_memory_region()
909 old_memslots = install_new_memslots(kvm, slots); __kvm_set_memory_region()
917 * IOMMU mapping: New slots need to be mapped. Old slots need to be __kvm_set_memory_region()
921 * slots (size changes, userspace addr changes and read-only flag __kvm_set_memory_region()
933 kvfree(slots); __kvm_set_memory_region()
1631 struct kvm_memslots *slots = kvm_memslots(kvm); kvm_gfn_to_hva_cache_init() local
1639 ghc->generation = slots->generation; kvm_gfn_to_hva_cache_init()
1668 struct kvm_memslots *slots = kvm_memslots(kvm); kvm_write_guest_cached() local
1673 if (slots->generation != ghc->generation) kvm_write_guest_cached()
1694 struct kvm_memslots *slots = kvm_memslots(kvm); kvm_read_guest_cached() local
1699 if (slots->generation != ghc->generation) kvm_read_guest_cached()
729 install_new_memslots(struct kvm *kvm, struct kvm_memslots *slots) install_new_memslots() argument
/linux-4.1.27/drivers/mfd/
H A Ddln2.c79 * the echo header field to index the slots field and find the receive context
83 /* RX slots bitmap */
90 struct dln2_rx_context slots[DLN2_MAX_RX_SLOTS]; member in struct:dln2_mod_rx_slots
202 rxc = &rxs->slots[rx_slot]; dln2_transfer_complete()
368 struct dln2_rx_context *rxc = &rxs->slots[*slot]; find_free_slot()
409 rxc = &rxs->slots[slot]; free_rx_slot()
464 rxc = &rxs->slots[rx_slot]; _dln2_transfer()
702 struct dln2_rx_context *rxc = &rxs->slots[j]; dln2_stop()
756 init_completion(&dln2->mod_rx_slots[i].slots[j].done); dln2_probe()
H A Drtsx_pcr.c863 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event) rtsx_pci_card_detect()
864 pcr->slots[RTSX_SD_CARD].card_event( rtsx_pci_card_detect()
865 pcr->slots[RTSX_SD_CARD].p_dev); rtsx_pci_card_detect()
866 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event) rtsx_pci_card_detect()
867 pcr->slots[RTSX_MS_CARD].card_event( rtsx_pci_card_detect()
868 pcr->slots[RTSX_MS_CARD].p_dev); rtsx_pci_card_detect()
1129 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot), rtsx_pci_init_chip()
1131 if (!pcr->slots) rtsx_pci_init_chip()
1149 kfree(pcr->slots); rtsx_pci_init_chip()
1318 kfree(pcr->slots); rtsx_pci_remove()
/linux-4.1.27/security/selinux/
H A Davc.c55 struct hlist_node list; /* anchored in avc_cache->slots[i] */
60 struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */ member in struct:avc_cache
166 INIT_HLIST_HEAD(&avc_cache.slots[i]); avc_init()
189 head = &avc_cache.slots[i]; avc_get_hash_stats()
246 head = &avc_cache.slots[hvalue]; avc_reclaim_node()
303 head = &avc_cache.slots[hvalue]; hlist_for_each_entry_rcu()
398 head = &avc_cache.slots[hvalue]; avc_insert()
550 head = &avc_cache.slots[hvalue]; avc_update_node()
617 head = &avc_cache.slots[i]; avc_flush()
/linux-4.1.27/drivers/net/xen-netback/
H A Dnetback.c65 * because it isn't providing Rx slots.
76 * This is the maximum slots a skb can have. If a guest sends a skb
361 * ring slots used is always equal to the number of meta slots used
685 int slots = 0; xenvif_count_requests() local
695 if (slots >= work_to_do) { xenvif_count_requests()
697 "Asked for %d slots but exceeds this limit\n", xenvif_count_requests()
703 /* This guest is really using too many slots and xenvif_count_requests()
706 if (unlikely(slots >= fatal_skb_slots)) { xenvif_count_requests()
708 "Malicious frontend using %d slots, threshold %u\n", xenvif_count_requests()
709 slots, fatal_skb_slots); xenvif_count_requests()
718 * 18 slots but less than fatal_skb_slots slots is xenvif_count_requests()
721 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) { xenvif_count_requests()
724 "Too many slots (%d) exceeding limit (%d), dropping packet\n", xenvif_count_requests()
725 slots, XEN_NETBK_LEGACY_SLOTS_MAX); xenvif_count_requests()
732 memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), xenvif_count_requests()
736 * first->size overflowed and following slots will xenvif_count_requests()
742 * Consume all slots and drop the packet. xenvif_count_requests()
753 slots++; xenvif_count_requests()
770 xenvif_tx_err(queue, first, cons + slots); xenvif_count_requests()
774 return slots; xenvif_count_requests()
828 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX. xenvif_get_requests()
1943 /* If the guest hasn't provided any Rx slots for a xenvif_kthread_guest_rx()
1957 * slots. xenvif_kthread_guest_rx()
H A Dcommon.h97 * the maximum slots a valid packet can use. Now this value is defined
203 /* Maximum number of Rx slots a to-guest packet may use, including the
309 /* Determine whether the needed number of slots (req) are available,
/linux-4.1.27/drivers/macintosh/
H A Dwindfarm_pm112.c440 /* PCI slots area fan */
470 printk(KERN_WARNING "windfarm: slots power sensor error %d\n", slots_fan_tick()
477 DBG_LOTS("slots PID power=%d.%.3d speed=%d\n", slots_fan_tick()
482 printk(KERN_WARNING "windfarm: slots fan error %d\n", err); slots_fan_tick()
573 } else if (!strcmp(ct->name, "slots-fan")) { pm112_new_control()
615 } else if (!strcmp(sr->name, "slots-power")) { pm112_new_sensor()
H A Dwindfarm_rm31.c465 DBG_LOTS("* slots fans tick\n"); slots_fan_tick()
469 pr_warning("wf_rm31: slots temp sensor error %d\n", err); slots_fan_tick()
476 DBG_LOTS("slots PID temp=%d.%.3d speed=%d\n", slots_fan_tick()
482 printk(KERN_WARNING "windfarm: slots bay fan error %d\n", err); slots_fan_tick()
586 else if (!strcmp(ct->name, "slots-fan")) rm31_new_control()
624 else if (!strcmp(sr->name, "slots-temp")) rm31_new_sensor()
H A Dwindfarm_smu_sensors.c161 printk(KERN_ERR "windfarm: read slots power failed, err %d\n", smu_slotspow_get()
248 ads->sens.name = "slots-power"; smu_ads_create()
399 /* Get slots power calibration data if any */ smu_fetch_param_partitions()
/linux-4.1.27/sound/soc/codecs/
H A Dssm2518.c530 unsigned int rx_mask, int slots, int width) ssm2518_set_tdm_slot()
537 if (slots == 0) ssm2518_set_tdm_slot()
545 if (slots == 1) { ssm2518_set_tdm_slot()
562 if (tx_mask != 0 || left_slot >= slots || right_slot >= slots) ssm2518_set_tdm_slot()
579 switch (slots) { ssm2518_set_tdm_slot()
529 ssm2518_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int width) ssm2518_set_tdm_slot() argument
H A Dmc13783.c308 unsigned int tx_mask, unsigned int rx_mask, int slots, mc13783_set_tdm_slot_dac()
316 switch (slots) { mc13783_set_tdm_slot_dac()
353 unsigned int tx_mask, unsigned int rx_mask, int slots, mc13783_set_tdm_slot_codec()
360 if (slots != 4) mc13783_set_tdm_slot_codec()
375 unsigned int tx_mask, unsigned int rx_mask, int slots, mc13783_set_tdm_slot_sync()
380 ret = mc13783_set_tdm_slot_dac(dai, tx_mask, rx_mask, slots, mc13783_set_tdm_slot_sync()
385 ret = mc13783_set_tdm_slot_codec(dai, tx_mask, rx_mask, slots, mc13783_set_tdm_slot_sync()
307 mc13783_set_tdm_slot_dac(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) mc13783_set_tdm_slot_dac() argument
352 mc13783_set_tdm_slot_codec(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) mc13783_set_tdm_slot_codec() argument
374 mc13783_set_tdm_slot_sync(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) mc13783_set_tdm_slot_sync() argument
H A Dadau17x1.c150 * source/destination or one of the TDM slots. The TDM slot is selected via
548 unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) adau17x1_set_dai_tdm_slot()
555 if (slots == 0) { adau17x1_set_dai_tdm_slot()
556 slots = 2; adau17x1_set_dai_tdm_slot()
562 switch (slots) { adau17x1_set_dai_tdm_slot()
579 switch (slot_width * slots) { adau17x1_set_dai_tdm_slot()
547 adau17x1_set_dai_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) adau17x1_set_dai_tdm_slot() argument
H A Dadau1977.c505 unsigned int rx_mask, int slots, int width) adau1977_set_tdm_slot()
513 if (slots == 0) { adau1977_set_tdm_slot()
530 if (slot[i] >= slots) adau1977_set_tdm_slot()
544 /* We can only generate 16 bit or 32 bit wide slots */ adau1977_set_tdm_slot()
556 switch (slots) { adau1977_set_tdm_slot()
606 adau1977->max_master_fs = min(192000, 24576000 / width / slots); adau1977_set_tdm_slot()
504 adau1977_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int width) adau1977_set_tdm_slot() argument
/linux-4.1.27/drivers/usb/host/whci/
H A Dwusb.c127 int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots) whc_set_num_dnts() argument
134 | WUSBDNTSCTRL_SLOTS(slots); whc_set_num_dnts()
/linux-4.1.27/arch/sh/boards/mach-sdk7786/
H A Dsetup.c156 * Historically these include the oscillator, clock B (slots 2/3/4) and
163 * off through the FPGA along with the PCI slots, we simply leave them in
/linux-4.1.27/arch/powerpc/lib/
H A Drheap.c99 * Assure at least the required amount of empty slots. If this function
103 static int assure_empty(rh_info_t * info, int slots) assure_empty() argument
108 if (slots >= 4) assure_empty()
112 if (info->empty_slots >= slots) assure_empty()
116 max_blocks = ((info->max_blocks + slots) + 15) & ~15; assure_empty()
125 /* If no more free slots, and failure to extend. */ get_slot()
128 printk(KERN_ERR "rh: out of slots; crash is imminent.\n"); get_slot()
713 "info @0x%p (%d slots empty / %d max)\n", rh_dump()
/linux-4.1.27/fs/f2fs/
H A Ddir.c453 int room_for_filename(const void *bitmap, int slots, int max_slots) room_for_filename() argument
463 if (zero_end - zero_start >= slots) room_for_filename()
478 int slots = GET_DENTRY_SLOTS(name->len); f2fs_update_dentry() local
487 for (i = 0; i < slots; i++) f2fs_update_dentry()
508 int slots = GET_DENTRY_SLOTS(namelen); __f2fs_add_link() local
549 slots, NR_DENTRY_IN_BLOCK); __f2fs_add_link()
657 int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); f2fs_delete_entry() local
668 for (i = 0; i < slots; i++) f2fs_delete_entry()
H A Dinline.c402 int slots = GET_DENTRY_SLOTS(namelen); f2fs_add_inline_entry() local
412 slots, NR_INLINE_DENTRY); f2fs_add_inline_entry()
462 int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); f2fs_delete_inline_entry() local
471 for (i = 0; i < slots; i++) f2fs_delete_inline_entry()
/linux-4.1.27/include/net/irda/
H A Daf_irda.h71 int nslots; /* Number of slots to use for discovery */
H A Ddiscovery.h83 int nslots; /* Number of slots to use when
/linux-4.1.27/arch/powerpc/platforms/embedded6xx/
H A Dstorcenter.c80 * from the four PCI slots plus onboard 8241 devices: I2C, DUART.
H A Dlinkstation.c79 * from the four PCI slots plus onboard 8241 devices: I2C, DUART.
/linux-4.1.27/arch/mips/include/asm/sgi/
H A Dgio.h18 * three physical connectors, but only two slots, GFX and EXP0.
/linux-4.1.27/arch/powerpc/boot/
H A Dugecon.c137 /* look for a usbgecko on memcard slots A and B */ ug_probe()
/linux-4.1.27/arch/m68k/mac/
H A Dbaboon.c94 * same workaround that's used for NuBus slots (see nubus_disabled and
/linux-4.1.27/arch/alpha/kernel/
H A Dsys_cabriolet.c168 * that's printed on the board. The interrupt pins from the PCI slots
193 * The AlphaPC64 is very similar to the EB66+ except that its slots
198 * printed on the board. The interrupt pins from the PCI slots are
250 * PCI slots, the SIO, PCI/IDE, and USB.
/linux-4.1.27/drivers/media/pci/ttpci/
H A Dav7110_ca.c125 int slots, ca_slot_info_t *slot) ci_ll_reset()
132 if (slots & (1 << i)) ci_ll_reset()
140 if (slots & (1 << i)) { ci_ll_reset()
124 ci_ll_reset(struct dvb_ringbuffer *cibuf, struct file *file, int slots, ca_slot_info_t *slot) ci_ll_reset() argument
/linux-4.1.27/drivers/staging/lustre/lustre/osc/
H A Dosc_page.c573 * occupy more LRU slots. On the other hand, we should avoid using up all LRU
574 * slots (client_obd::cl_lru_left) otherwise process has to be put into sleep
575 * for free LRU slots - this will be very bad so the algorithm requires each
576 * OSC to free slots voluntarily to maintain a reasonable number of free slots
588 /* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
589 * we should free slots aggressively. In this way, slots are freed in a steady
603 /* if it's going to run out LRU slots, we should free some, but not osc_cache_too_much()
830 CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n", osc_lru_reclaim()
835 /* Reclaim LRU slots from other client_obd as it can't free enough osc_lru_reclaim()
892 * other OSCs that we're lack of LRU slots. */ osc_lru_reserve()
/linux-4.1.27/drivers/net/wireless/b43/
H A Ddma.h239 /* Number of descriptor slots in the ring. */
241 /* Number of used descriptor slots. */
264 /* Maximum number of used slots. */
/linux-4.1.27/drivers/dma/
H A Dmv_xor.h92 * @completed_slots: slots completed by HW but still need to be acked
96 * @all_slots: complete domain of slots usable by the channel
133 * @tx_list: list of slots that make up a multi-descriptor transaction
/linux-4.1.27/arch/powerpc/include/asm/
H A Dptrace.h220 #define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */
221 #define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */
224 #define PT_VSR0_32 300 /* each VSR reg occupies 4 slots in 32-bit */
/linux-4.1.27/arch/powerpc/include/uapi/asm/
H A Dptrace.h114 #define PT_FPR0 48 /* each FP reg occupies 2 slots in this space */
126 #define PT_VR0 82 /* each Vector reg occupies 2 slots in 64-bit */
134 #define PT_VSR0 150 /* each VSR reg occupies 2 slots in 64-bit */
/linux-4.1.27/drivers/scsi/
H A D53c700.c310 hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET); NCR_700_detect()
338 memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot) NCR_700_detect()
341 dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0] NCR_700_detect()
342 - (unsigned long)&hostdata->slots[0].SG[0]); NCR_700_detect()
343 hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset)); NCR_700_detect()
345 hostdata->free_list = &hostdata->slots[j]; NCR_700_detect()
347 hostdata->slots[j-1].ITL_forw = &hostdata->slots[j]; NCR_700_detect()
348 hostdata->slots[j].state = NCR_700_SLOT_FREE; NCR_700_detect()
1197 if(SG >= to32bit(&hostdata->slots[i].pSG[0]) process_script_interrupt()
1198 && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS])) process_script_interrupt()
1201 printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset); process_script_interrupt()
1202 SCp = hostdata->slots[i].cmnd; process_script_interrupt()
1546 /* clear all the slots and their pending commands */ __shost_for_each_device()
1550 &hostdata->slots[i]; __shost_for_each_device()
1731 if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1733 if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1735 host->host_no, &hostdata->slots[j],
1736 hostdata->slots[j].cmnd));
/linux-4.1.27/drivers/gpu/drm/i915/
H A Dintel_dp_mst.c41 int lane_count, slots, rate; intel_dp_mst_compute_config() local
91 slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn); intel_dp_mst_compute_config()
98 pipe_config->dp_m_n.tu = slots; intel_dp_mst_compute_config()
154 int slots; intel_mst_pre_enable_dp() local
191 intel_crtc->config->pbn, &slots);
/linux-4.1.27/fs/squashfs/
H A Dfile.c37 * is split into slots, caching up to eight 224 GiB files (128 KiB blocks).
38 * Larger files use multiple slots, with 1.75 TiB files using all 8 slots.
221 * slots.
280 * which case further slots will be used. fill_meta_index()
/linux-4.1.27/include/drm/
H A Ddrm_dp_mst_helper.h35 * @num_slots: number of slots for this PBN
103 * @tx_slots: transmission slots for this device.
126 /* slots are protected by mstb->mgr->qlock */
410 * @pbn_div: PBN to slots divisor.
485 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots);
/linux-4.1.27/drivers/net/
H A Dxen-netfront.c81 /* Minimum number of Rx slots (includes slot for GSO metadata). */
180 * Access macros for acquiring freeing slots in tx_skbs[].
472 * Count how many ring slots are required to send this skb. Each frag
521 int slots; xennet_start_xmit() local
547 slots = xennet_count_skb_slots(skb); xennet_start_xmit()
548 if (unlikely(slots > MAX_SKB_FRAGS + 1)) { xennet_start_xmit()
549 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", xennet_start_xmit()
550 slots, skb->len); xennet_start_xmit()
562 (slots > 1 && !xennet_can_sg(dev)) || xennet_start_xmit()
723 int slots = 1; xennet_get_responses() local
767 if (cons + slots == rp) { xennet_get_responses()
769 dev_warn(dev, "Need more slots\n"); xennet_get_responses()
774 rx = RING_GET_RESPONSE(&queue->rx, cons + slots); xennet_get_responses()
775 skb = xennet_get_rx_skb(queue, cons + slots); xennet_get_responses()
776 ref = xennet_get_rx_ref(queue, cons + slots); xennet_get_responses()
777 slots++; xennet_get_responses()
780 if (unlikely(slots > max)) { xennet_get_responses()
782 dev_warn(dev, "Too many slots\n"); xennet_get_responses()
787 queue->rx.rsp_cons = cons + slots; xennet_get_responses()
/linux-4.1.27/drivers/net/ethernet/broadcom/
H A Dbgmac.c131 slot = &ring->slots[i]; bgmac_dma_tx_add_buf()
146 struct bgmac_slot_info *slot = &ring->slots[index]; bgmac_dma_tx_add()
161 /* ring->end - ring->start will return the number of valid slots, bgmac_dma_tx_add()
187 slot = &ring->slots[index]; bgmac_dma_tx_add()
224 struct bgmac_slot_info *slot = &ring->slots[index]; bgmac_dma_tx_add()
257 struct bgmac_slot_info *slot = &ring->slots[slot_idx]; bgmac_dma_tx_free()
393 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr)); bgmac_dma_rx_setup_desc()
394 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr)); bgmac_dma_rx_setup_desc()
428 struct bgmac_slot_info *slot = &ring->slots[ring->start]; bgmac_dma_rx_read()
527 slot = &ring->slots[i]; bgmac_dma_tx_ring_free()
550 slot = &ring->slots[i]; bgmac_dma_rx_ring_free()
642 /* No need to alloc TX slots yet */ bgmac_dma_alloc()
714 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]); bgmac_dma_init()
/linux-4.1.27/sound/pci/
H A Datiixp.c183 #define ATI_REG_6CH_REORDER 0x84 /* reorder slots for 6ch */
220 enum { ATI_PCM_OUT, ATI_PCM_IN, ATI_PCM_SPDIF, NUM_ATI_PCMS }; /* AC97 pcm slots */
867 /* set up slots and formats for SPDIF OUT */ snd_atiixp_spdif_prepare()
875 /* enable slots 10/11 */ snd_atiixp_spdif_prepare()
894 /* set up slots and formats for analog OUT */ snd_atiixp_playback_prepare()
939 /* set up slots and formats for analog IN */ snd_atiixp_capture_prepare()
985 pcm->r[0].slots); snd_atiixp_pcm_hw_params()
1191 .slots = (1 << AC97_SLOT_PCM_LEFT) |
1205 .slots = (1 << AC97_SLOT_PCM_LEFT) |
1215 .slots = (1 << AC97_SLOT_SPDIF_LEFT2) |
1275 if (pbus->pcms[ATI_PCM_OUT].r[0].slots & (1 << AC97_SLOT_PCM_SLEFT)) { snd_atiixp_pcm_new()
1276 if (pbus->pcms[ATI_PCM_OUT].r[0].slots & (1 << AC97_SLOT_LFE)) snd_atiixp_pcm_new()
1330 /* pre-select AC97 SPDIF slots 10/11 */ snd_atiixp_pcm_new()
/linux-4.1.27/sound/soc/davinci/
H A Ddavinci-mcasp.c622 * tdm-slots (for I2S - divided by 2). davinci_config_channel_size()
663 u8 slots = mcasp->tdm_slots; mcasp_common_hw_param() local
664 u8 max_active_serializers = (channels + slots - 1) / slots; mcasp_common_hw_param()
712 active_serializers * slots); mcasp_common_hw_param()
778 * can cope with the transaction using as many slots as channels mcasp_i2s_hw_param()
822 /* Set the TX tdm : for all the slots */ mcasp_dit_hw_param()
1143 * number of serializers for the direction * tdm slots per serializer davinci_mcasp_startup()
1477 ret = of_property_read_u32(np, "tdm-slots", &val); davinci_mcasp_set_pdata_from_of()
1481 "tdm-slots must be in rage [2-32]\n"); davinci_mcasp_set_pdata_from_of()
1614 /* sanity check for tdm slots parameter */ davinci_mcasp_probe()
1617 dev_err(&pdev->dev, "invalid tdm slots: %d\n", davinci_mcasp_probe()
1621 dev_err(&pdev->dev, "invalid tdm slots: %d\n", davinci_mcasp_probe()
/linux-4.1.27/sound/soc/pxa/
H A Dpxa-ssp.c381 * Set the active slots in TDM/Network mode
384 unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) pxa_ssp_set_dai_tdm_slot()
399 if (slots > 1) { pxa_ssp_set_dai_tdm_slot()
403 /* set number of active slots */ pxa_ssp_set_dai_tdm_slot()
404 sscr0 |= SSCR0_SlotsPerFrm(slots); pxa_ssp_set_dai_tdm_slot()
626 /* When we use a network mode, we always require TDM slots pxa_ssp_hw_params()
383 pxa_ssp_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) pxa_ssp_set_dai_tdm_slot() argument
/linux-4.1.27/drivers/ipack/
H A Dipack.c208 struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots, ipack_bus_register() argument
227 bus->slots = slots; ipack_bus_register()
/linux-4.1.27/drivers/net/ethernet/ibm/emac/
H A Dcore.h376 * address match slots, 2) width of the registers for handling address
377 * match slots, 3) number of registers for handling address match
378 * slots and 4) base offset for those registers.
/linux-4.1.27/drivers/block/aoe/
H A Daoedev.c31 * - slots per shelf,
68 * a static number of supported slots per shelf */ minor_get_static()
73 pr_err("aoe: %s %d slots per shelf\n", minor_get_static()
/linux-4.1.27/drivers/uwb/
H A Ddrp-avail.c47 * slots are used for the BP (it may change in size).
261 * So we clear available slots, we set used slots :)
/linux-4.1.27/include/xen/interface/io/
H A Dnetif.h18 * ring slots a skb can use. Netfront / netback may not work as
24 * slots backend must support.
29 * send a valid packet using slots up to this value.
/linux-4.1.27/fs/btrfs/tests/
H A Dqgroup-tests.c67 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); insert_normal_tree_ref()
116 item = btrfs_item_ptr(path->nodes[0], path->slots[0], add_tree_ref()
200 item = btrfs_item_ptr(path->nodes[0], path->slots[0], remove_extent_ref()
/linux-4.1.27/include/linux/mmc/
H A Ddw_mmc.h44 * struct dw_mci - MMC controller state shared between all slots
77 * @queue: List of slots waiting for access to the controller.
81 * @num_slots: Number of slots available.
/linux-4.1.27/drivers/memstick/host/
H A Drtsx_pci_ms.c570 pcr->slots[RTSX_MS_CARD].p_dev = pdev; rtsx_pci_ms_drv_probe()
571 pcr->slots[RTSX_MS_CARD].card_event = rtsx_pci_ms_card_event; rtsx_pci_ms_drv_probe()
600 pcr->slots[RTSX_MS_CARD].p_dev = NULL; rtsx_pci_ms_drv_remove()
601 pcr->slots[RTSX_MS_CARD].card_event = NULL; rtsx_pci_ms_drv_remove()
/linux-4.1.27/drivers/net/fddi/skfp/h/
H A Dtargethw.h75 short max_slots ; /* maximum number of slots */
/linux-4.1.27/drivers/net/wireless/iwlwifi/mvm/
H A Dfw-api-mac.h296 * @cw_min: Contention window, start value in numbers of slots.
298 * @cw_max: Contention window, max value in numbers of slots.
300 * @aifsn: Number of slots in Arbitration Interframe Space (before
333 * @short_slot: 0x10 for enabling short slots, 0 otherwise
/linux-4.1.27/fs/ocfs2/
H A Dheartbeat.c74 * participate yet. We check the slots after the cluster ocfs2_do_node_down()
/linux-4.1.27/include/linux/sunrpc/
H A Dxprt.h177 struct list_head free; /* free slots */
178 unsigned int max_reqs; /* max number of slots */
179 unsigned int min_reqs; /* min number of slots */
180 atomic_t num_reqs; /* total slots */
/linux-4.1.27/include/uapi/linux/
H A Dzorro.h105 __be16 cd_SlotSize; /* number of slots (PRIVATE) */
/linux-4.1.27/arch/mips/sgi-ip22/
H A Dip22-eisa.c41 /* I2 has four EISA slots. */
/linux-4.1.27/arch/parisc/include/asm/
H A Deisa_eeprom.h29 u_int8_t num_slots; /* number of EISA slots in system */
/linux-4.1.27/arch/ia64/include/asm/sn/
H A Dgeo.h21 #define MAX_SLOTS 0xf /* slots per module */
/linux-4.1.27/arch/m68k/include/uapi/asm/
H A Dbootinfo-mac.h43 #define BI_MAC_SLOTMASK 0x801b /* Mac Nubus slots present */
/linux-4.1.27/arch/alpha/include/asm/
H A Dmachvec.h30 two slots are at the beginning of the struct. */
/linux-4.1.27/arch/alpha/lib/
H A Dev67-strchr.S83 cmoveq t1, $31, v0 # E : Two mapping slots, latency = 2

Completed in 6055 milliseconds

1234