/linux-4.1.27/fs/nfs/ |
D | nfs4session.c | 26 static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue) in nfs4_init_slot_table() argument 28 tbl->highest_used_slotid = NFS4_NO_SLOT; in nfs4_init_slot_table() 29 spin_lock_init(&tbl->slot_tbl_lock); in nfs4_init_slot_table() 30 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue); in nfs4_init_slot_table() 31 init_completion(&tbl->complete); in nfs4_init_slot_table() 37 static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl, u32 newsize) in nfs4_shrink_slot_table() argument 40 if (newsize >= tbl->max_slots) in nfs4_shrink_slot_table() 43 p = &tbl->slots; in nfs4_shrink_slot_table() 51 tbl->max_slots--; in nfs4_shrink_slot_table() 60 void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl) in nfs4_slot_tbl_drain_complete() argument [all …]
|
D | nfs4session.h | 76 extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, 78 extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl); 79 extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl); 80 extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot); 81 extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl); 82 bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl, 84 void nfs41_wake_slot_table(struct nfs4_slot_table *tbl); 86 static inline bool nfs4_slot_tbl_draining(struct nfs4_slot_table *tbl) in nfs4_slot_tbl_draining() argument 88 return !!test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state); in nfs4_slot_tbl_draining() 92 extern void nfs41_set_target_slotid(struct nfs4_slot_table *tbl, [all …]
|
D | callback_proc.c | 316 validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) in validate_seqid() argument 326 slot = tbl->slots + args->csa_slotid; in validate_seqid() 357 tbl->highest_used_slotid = args->csa_slotid; in validate_seqid() 373 struct nfs4_slot_table *tbl; in referring_call_exists() local 382 tbl = &session->fc_slot_table; in referring_call_exists() 402 spin_lock(&tbl->slot_tbl_lock); in referring_call_exists() 403 status = (test_bit(ref->rc_slotid, tbl->used_slots) && in referring_call_exists() 404 tbl->slots[ref->rc_slotid].seq_nr == in referring_call_exists() 406 spin_unlock(&tbl->slot_tbl_lock); in referring_call_exists() 420 struct nfs4_slot_table *tbl; in nfs4_callback_sequence() local [all …]
|
D | nfs4client.c | 275 struct nfs4_slot_table *tbl; in nfs40_init_client() local 278 tbl = kzalloc(sizeof(*tbl), GFP_NOFS); in nfs40_init_client() 279 if (tbl == NULL) in nfs40_init_client() 282 ret = nfs4_setup_slot_table(tbl, NFS4_MAX_SLOT_TABLE, in nfs40_init_client() 285 kfree(tbl); in nfs40_init_client() 289 clp->cl_slot_tbl = tbl; in nfs40_init_client()
|
D | nfs4state.c | 218 static void nfs4_end_drain_slot_table(struct nfs4_slot_table *tbl) in nfs4_end_drain_slot_table() argument 220 if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) { in nfs4_end_drain_slot_table() 221 spin_lock(&tbl->slot_tbl_lock); in nfs4_end_drain_slot_table() 222 nfs41_wake_slot_table(tbl); in nfs4_end_drain_slot_table() 223 spin_unlock(&tbl->slot_tbl_lock); in nfs4_end_drain_slot_table() 242 static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl) in nfs4_drain_slot_tbl() argument 244 set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state); in nfs4_drain_slot_tbl() 245 spin_lock(&tbl->slot_tbl_lock); in nfs4_drain_slot_tbl() 246 if (tbl->highest_used_slotid != NFS4_NO_SLOT) { in nfs4_drain_slot_tbl() 247 reinit_completion(&tbl->complete); in nfs4_drain_slot_tbl() [all …]
|
D | callback_xdr.c | 760 struct nfs4_slot_table *tbl = &session->bc_slot_table; in nfs4_callback_free_slot() local 762 spin_lock(&tbl->slot_tbl_lock); in nfs4_callback_free_slot() 767 tbl->highest_used_slotid = NFS4_NO_SLOT; in nfs4_callback_free_slot() 768 nfs4_slot_tbl_drain_complete(tbl); in nfs4_callback_free_slot() 769 spin_unlock(&tbl->slot_tbl_lock); in nfs4_callback_free_slot()
|
D | nfs4proc.c | 500 int nfs40_setup_sequence(struct nfs4_slot_table *tbl, in nfs40_setup_sequence() argument 511 spin_lock(&tbl->slot_tbl_lock); in nfs40_setup_sequence() 512 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) in nfs40_setup_sequence() 515 slot = nfs4_alloc_slot(tbl); in nfs40_setup_sequence() 521 spin_unlock(&tbl->slot_tbl_lock); in nfs40_setup_sequence() 532 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, in nfs40_setup_sequence() 535 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); in nfs40_setup_sequence() 536 spin_unlock(&tbl->slot_tbl_lock); in nfs40_setup_sequence() 545 struct nfs4_slot_table *tbl; in nfs40_sequence_done() local 550 tbl = slot->table; in nfs40_sequence_done() [all …]
|
D | nfs4_fs.h | 449 extern int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | iommu.c | 177 struct iommu_table *tbl, in iommu_range_alloc() argument 211 pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc() 214 pool = &(tbl->large_pool); in iommu_range_alloc() 216 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc() 236 if (limit + tbl->it_offset > mask) { in iommu_range_alloc() 237 limit = mask - tbl->it_offset + 1; in iommu_range_alloc() 244 pool = &(tbl->pools[0]); in iommu_range_alloc() 254 1 << tbl->it_page_shift); in iommu_range_alloc() 256 boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift); in iommu_range_alloc() 259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc() [all …]
|
D | dma-iommu.c | 78 struct iommu_table *tbl = get_iommu_table_base(dev); in dma_iommu_dma_supported() local 80 if (!tbl) { in dma_iommu_dma_supported() 86 if (tbl->it_offset > (mask >> tbl->it_page_shift)) { in dma_iommu_dma_supported() 89 mask, tbl->it_offset << tbl->it_page_shift); in dma_iommu_dma_supported() 97 struct iommu_table *tbl = get_iommu_table_base(dev); in dma_iommu_get_required_mask() local 99 if (!tbl) in dma_iommu_get_required_mask() 102 mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); in dma_iommu_get_required_mask()
|
D | vio.c | 521 struct iommu_table *tbl; in vio_dma_iommu_map_page() local 524 tbl = get_iommu_table_base(dev); in vio_dma_iommu_map_page() 525 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) { in vio_dma_iommu_map_page() 532 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); in vio_dma_iommu_map_page() 545 struct iommu_table *tbl; in vio_dma_iommu_unmap_page() local 547 tbl = get_iommu_table_base(dev); in vio_dma_iommu_unmap_page() 550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); in vio_dma_iommu_unmap_page() 558 struct iommu_table *tbl; in vio_dma_iommu_map_sg() local 563 tbl = get_iommu_table_base(dev); in vio_dma_iommu_map_sg() 565 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); in vio_dma_iommu_map_sg() [all …]
|
D | eeh.c | 1430 struct iommu_table *tbl; in dev_has_iommu_table() local 1435 tbl = get_iommu_table_base(dev); in dev_has_iommu_table() 1436 if (tbl && tbl->it_group) { in dev_has_iommu_table()
|
D | asm-offsets.c | 745 arch.timing_exit.tv32.tbl)); in main() 749 arch.timing_last_enter.tv32.tbl)); in main()
|
/linux-4.1.27/include/linux/ |
D | rhashtable.h | 144 struct bucket_table __rcu *tbl; member 161 struct bucket_table *tbl; member 204 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, in rht_bucket_index() argument 207 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); in rht_bucket_index() 211 struct rhashtable *ht, const struct bucket_table *tbl, in rht_key_hashfn() argument 218 hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd); in rht_key_hashfn() 223 hash = params.hashfn(key, key_len, tbl->hash_rnd); in rht_key_hashfn() 225 hash = jhash(key, key_len, tbl->hash_rnd); in rht_key_hashfn() 228 tbl->hash_rnd); in rht_key_hashfn() 233 hash = params.hashfn(key, key_len, tbl->hash_rnd); in rht_key_hashfn() [all …]
|
/linux-4.1.27/lib/ |
D | rhashtable.c | 36 const struct bucket_table *tbl, in head_hashfn() argument 39 return rht_head_hashfn(ht, tbl, he, ht->p); in head_hashfn() 51 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) in lockdep_rht_bucket_is_held() argument 53 spinlock_t *lock = rht_bucket_lock(tbl, hash); in lockdep_rht_bucket_is_held() 63 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, in alloc_bucket_locks() argument 77 size = min_t(unsigned int, size, tbl->size >> 1); in alloc_bucket_locks() 83 tbl->locks = vmalloc(size * sizeof(spinlock_t)); in alloc_bucket_locks() 86 tbl->locks = kmalloc_array(size, sizeof(spinlock_t), in alloc_bucket_locks() 88 if (!tbl->locks) in alloc_bucket_locks() 91 spin_lock_init(&tbl->locks[i]); in alloc_bucket_locks() [all …]
|
D | test_rhashtable.c | 85 struct bucket_table *tbl; in test_bucket_stats() local 87 tbl = rht_dereference_rcu(ht->tbl, ht); in test_bucket_stats() 88 for (i = 0; i < tbl->size; i++) { in test_bucket_stats() 92 pr_info(" [%#4x/%u]", i, tbl->size); in test_bucket_stats() 94 rht_for_each_entry_rcu(obj, pos, tbl, i, node) { in test_bucket_stats() 101 rht_for_each_entry_rcu(obj, pos, tbl, i, node) in test_bucket_stats() 110 i, tbl->buckets[i], cnt); in test_bucket_stats() 122 struct bucket_table *tbl; in test_rhashtable() local 175 tbl = rht_dereference_rcu(ht->tbl, ht); in test_rhashtable() 176 for (i = 0; i < tbl->size; i++) in test_rhashtable() [all …]
|
D | devres.c | 293 void __iomem **tbl; in pcim_iomap() local 297 tbl = (void __iomem **)pcim_iomap_table(pdev); in pcim_iomap() 298 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ in pcim_iomap() 301 tbl[bar] = pci_iomap(pdev, bar, maxlen); in pcim_iomap() 302 return tbl[bar]; in pcim_iomap() 315 void __iomem **tbl; in pcim_iounmap() local 320 tbl = (void __iomem **)pcim_iomap_table(pdev); in pcim_iounmap() 321 BUG_ON(!tbl); in pcim_iounmap() 324 if (tbl[i] == addr) { in pcim_iounmap() 325 tbl[i] = NULL; in pcim_iounmap()
|
D | iommu-common.c | 232 static struct iommu_pool *get_pool(struct iommu_map_table *tbl, in get_pool() argument 236 unsigned long largepool_start = tbl->large_pool.start; in get_pool() 237 bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0); in get_pool() 241 p = &tbl->large_pool; in get_pool() 243 unsigned int pool_nr = entry / tbl->poolsize; in get_pool() 245 BUG_ON(pool_nr >= tbl->nr_pools); in get_pool() 246 p = &tbl->pools[pool_nr]; in get_pool()
|
/linux-4.1.27/drivers/vfio/ |
D | vfio_iommu_spapr_tce.c | 46 struct iommu_table *tbl; member 54 struct iommu_table *tbl = container->tbl; in tce_iommu_enable() local 56 if (!container->tbl) in tce_iommu_enable() 84 npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT; in tce_iommu_enable() 108 if (!container->tbl || !current->mm) in tce_iommu_disable() 112 current->mm->locked_vm -= (container->tbl->it_size << in tce_iommu_disable() 139 WARN_ON(container->tbl && !container->tbl->it_group); in tce_iommu_release() 142 if (container->tbl && container->tbl->it_group) in tce_iommu_release() 143 tce_iommu_detach_group(iommu_data, container->tbl->it_group); in tce_iommu_release() 172 struct iommu_table *tbl = container->tbl; in tce_iommu_ioctl() local [all …]
|
/linux-4.1.27/drivers/net/wireless/mwifiex/ |
D | 11n_rxreorder.c | 89 struct mwifiex_rx_reorder_tbl *tbl, in mwifiex_11n_dispatch_pkt_until_start_win() argument 96 pkt_to_send = (start_win > tbl->start_win) ? in mwifiex_11n_dispatch_pkt_until_start_win() 97 min((start_win - tbl->start_win), tbl->win_size) : in mwifiex_11n_dispatch_pkt_until_start_win() 98 tbl->win_size; in mwifiex_11n_dispatch_pkt_until_start_win() 103 if (tbl->rx_reorder_ptr[i]) { in mwifiex_11n_dispatch_pkt_until_start_win() 104 rx_tmp_ptr = tbl->rx_reorder_ptr[i]; in mwifiex_11n_dispatch_pkt_until_start_win() 105 tbl->rx_reorder_ptr[i] = NULL; in mwifiex_11n_dispatch_pkt_until_start_win() 117 for (i = 0; i < tbl->win_size - pkt_to_send; ++i) { in mwifiex_11n_dispatch_pkt_until_start_win() 118 tbl->rx_reorder_ptr[i] = tbl->rx_reorder_ptr[pkt_to_send + i]; in mwifiex_11n_dispatch_pkt_until_start_win() 119 tbl->rx_reorder_ptr[pkt_to_send + i] = NULL; in mwifiex_11n_dispatch_pkt_until_start_win() [all …]
|
D | init.c | 38 struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl; in mwifiex_add_bss_prio_tbl() local 48 spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags); in mwifiex_add_bss_prio_tbl() 49 list_add_tail(&bss_prio->list, &tbl[priv->bss_priority].bss_prio_head); in mwifiex_add_bss_prio_tbl() 50 spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags); in mwifiex_add_bss_prio_tbl()
|
D | 11n.c | 729 struct mwifiex_tx_ba_stream_tbl *tbl, *tmp; in mwifiex_del_tx_ba_stream_tbl_by_ra() local 736 list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list) { in mwifiex_del_tx_ba_stream_tbl_by_ra() 737 if (!memcmp(tbl->ra, ra, ETH_ALEN)) { in mwifiex_del_tx_ba_stream_tbl_by_ra() 740 mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl); in mwifiex_del_tx_ba_stream_tbl_by_ra()
|
D | wmm.c | 1017 struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl; in mwifiex_rotate_priolists() local 1021 spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags); in mwifiex_rotate_priolists() 1026 list_move(&tbl[priv->bss_priority].bss_prio_head, in mwifiex_rotate_priolists() 1027 &tbl[priv->bss_priority].bss_prio_cur->list); in mwifiex_rotate_priolists() 1028 spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags); in mwifiex_rotate_priolists()
|
/linux-4.1.27/drivers/net/wireless/iwlwifi/dvm/ |
D | rs.c | 453 static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) in get_expected_tpt() argument 455 if (tbl->expected_tpt) in get_expected_tpt() 456 return tbl->expected_tpt[rs_index]; in get_expected_tpt() 467 static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, in rs_collect_tx_data() argument 478 window = &(tbl->win[scale_index]); in rs_collect_tx_data() 481 tpt = get_expected_tpt(tbl, scale_index); in rs_collect_tx_data() 546 struct iwl_scale_tbl_info *tbl, in rate_n_flags_from_tbl() argument 551 if (is_legacy(tbl->lq_type)) { in rate_n_flags_from_tbl() 556 } else if (is_Ht(tbl->lq_type)) { in rate_n_flags_from_tbl() 563 if (is_siso(tbl->lq_type)) in rate_n_flags_from_tbl() [all …]
|
D | rs.h | 276 #define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) argument 277 #define is_siso(tbl) ((tbl) == LQ_SISO) argument 278 #define is_mimo2(tbl) ((tbl) == LQ_MIMO2) argument 279 #define is_mimo3(tbl) ((tbl) == LQ_MIMO3) argument 280 #define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl)) argument 281 #define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) argument 282 #define is_a_band(tbl) ((tbl) == LQ_A) argument 283 #define is_g_and(tbl) ((tbl) == LQ_G) argument
|
D | calib.c | 432 __le16 *tbl) in iwl_prepare_legacy_sensitivity_tbl() argument 434 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 436 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 438 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 440 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 443 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 445 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 448 tbl[HD_MIN_ENERGY_CCK_DET_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 450 tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 453 tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() [all …]
|
/linux-4.1.27/arch/x86/kernel/ |
D | pci-calgary_64.c | 174 static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev); 175 static void calgary_tce_cache_blast(struct iommu_table *tbl); 176 static void calgary_dump_error_regs(struct iommu_table *tbl); 177 static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev); 178 static void calioc2_tce_cache_blast(struct iommu_table *tbl); 179 static void calioc2_dump_error_regs(struct iommu_table *tbl); 180 static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl); 197 static inline int translation_enabled(struct iommu_table *tbl) in translation_enabled() argument 200 return (tbl != NULL); in translation_enabled() 203 static void iommu_range_reserve(struct iommu_table *tbl, in iommu_range_reserve() argument [all …]
|
D | tce_64.c | 49 void tce_build(struct iommu_table *tbl, unsigned long index, in tce_build() argument 60 tp = ((u64*)tbl->it_base) + index; in tce_build() 75 void tce_free(struct iommu_table *tbl, long index, unsigned int npages) in tce_free() argument 79 tp = ((u64*)tbl->it_base) + index; in tce_free() 98 static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl) in tce_table_setparms() argument 104 tbl->it_busno = dev->bus->number; in tce_table_setparms() 107 tbl->it_size = table_size_to_number_of_entries(specified_table_size); in tce_table_setparms() 113 bitmapsz = tbl->it_size / BITS_PER_BYTE; in tce_table_setparms() 121 tbl->it_map = (unsigned long*)bmppages; in tce_table_setparms() 123 memset(tbl->it_map, 0, bitmapsz); in tce_table_setparms() [all …]
|
/linux-4.1.27/net/netfilter/ipvs/ |
D | ip_vs_lblc.c | 171 ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) in ip_vs_lblc_hash() argument 175 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblc_hash() 176 atomic_inc(&tbl->entries); in ip_vs_lblc_hash() 182 ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl, in ip_vs_lblc_get() argument 188 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblc_get() 201 ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr, in ip_vs_lblc_new() argument 206 en = ip_vs_lblc_get(af, tbl, daddr); in ip_vs_lblc_new() 223 ip_vs_lblc_hash(tbl, en); in ip_vs_lblc_new() 234 struct ip_vs_lblc_table *tbl = svc->sched_data; in ip_vs_lblc_flush() local 240 tbl->dead = 1; in ip_vs_lblc_flush() [all …]
|
D | ip_vs_lblcr.c | 334 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) in ip_vs_lblcr_hash() argument 338 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblcr_hash() 339 atomic_inc(&tbl->entries); in ip_vs_lblcr_hash() 345 ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, in ip_vs_lblcr_get() argument 351 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblcr_get() 364 ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr, in ip_vs_lblcr_new() argument 369 en = ip_vs_lblcr_get(af, tbl, daddr); in ip_vs_lblcr_new() 385 ip_vs_lblcr_hash(tbl, en); in ip_vs_lblcr_new() 400 struct ip_vs_lblcr_table *tbl = svc->sched_data; in ip_vs_lblcr_flush() local 406 tbl->dead = 1; in ip_vs_lblcr_flush() [all …]
|
D | ip_vs_ctl.c | 3762 struct ctl_table *tbl; in ip_vs_control_net_init_sysctl() local 3770 tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL); in ip_vs_control_net_init_sysctl() 3771 if (tbl == NULL) in ip_vs_control_net_init_sysctl() 3776 tbl[0].procname = NULL; in ip_vs_control_net_init_sysctl() 3778 tbl = vs_vars; in ip_vs_control_net_init_sysctl() 3782 tbl[idx++].data = &ipvs->sysctl_amemthresh; in ip_vs_control_net_init_sysctl() 3784 tbl[idx++].data = &ipvs->sysctl_am_droprate; in ip_vs_control_net_init_sysctl() 3785 tbl[idx++].data = &ipvs->sysctl_drop_entry; in ip_vs_control_net_init_sysctl() 3786 tbl[idx++].data = &ipvs->sysctl_drop_packet; in ip_vs_control_net_init_sysctl() 3788 tbl[idx++].data = &ipvs->sysctl_conntrack; in ip_vs_control_net_init_sysctl() [all …]
|
/linux-4.1.27/net/core/ |
D | neighbour.c | 57 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); 119 static int neigh_forced_gc(struct neigh_table *tbl) in neigh_forced_gc() argument 125 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs); in neigh_forced_gc() 127 write_lock_bh(&tbl->lock); in neigh_forced_gc() 128 nht = rcu_dereference_protected(tbl->nht, in neigh_forced_gc() 129 lockdep_is_held(&tbl->lock)); in neigh_forced_gc() 136 lockdep_is_held(&tbl->lock))) != NULL) { in neigh_forced_gc() 146 lockdep_is_held(&tbl->lock))); in neigh_forced_gc() 158 tbl->last_flush = jiffies; in neigh_forced_gc() 160 write_unlock_bh(&tbl->lock); in neigh_forced_gc() [all …]
|
D | sysctl_net_core.c | 207 struct ctl_table tbl = { in set_default_qdisc() local 215 ret = proc_dostring(&tbl, write, buffer, lenp, ppos); in set_default_qdisc() 422 struct ctl_table *tbl; in sysctl_core_net_init() local 426 tbl = netns_core_table; in sysctl_core_net_init() 428 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); in sysctl_core_net_init() 429 if (tbl == NULL) in sysctl_core_net_init() 432 tbl[0].data = &net->core.sysctl_somaxconn; in sysctl_core_net_init() 436 tbl[0].procname = NULL; in sysctl_core_net_init() 440 net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl); in sysctl_core_net_init() 447 if (tbl != netns_core_table) in sysctl_core_net_init() [all …]
|
/linux-4.1.27/drivers/net/wireless/iwlegacy/ |
D | 4965-rs.c | 404 il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx) in il4965_get_expected_tpt() argument 406 if (tbl->expected_tpt) in il4965_get_expected_tpt() 407 return tbl->expected_tpt[rs_idx]; in il4965_get_expected_tpt() 419 il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx, in il4965_rs_collect_tx_data() argument 430 win = &(tbl->win[scale_idx]); in il4965_rs_collect_tx_data() 433 tpt = il4965_get_expected_tpt(tbl, scale_idx); in il4965_rs_collect_tx_data() 497 il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl, in il4965_rate_n_flags_from_tbl() argument 502 if (is_legacy(tbl->lq_type)) { in il4965_rate_n_flags_from_tbl() 507 } else if (is_Ht(tbl->lq_type)) { in il4965_rate_n_flags_from_tbl() 514 if (is_siso(tbl->lq_type)) in il4965_rate_n_flags_from_tbl() [all …]
|
D | 4965-calib.c | 357 __le16 *tbl) in il4965_prepare_legacy_sensitivity_tbl() argument 359 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] = in il4965_prepare_legacy_sensitivity_tbl() 361 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] = in il4965_prepare_legacy_sensitivity_tbl() 363 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] = in il4965_prepare_legacy_sensitivity_tbl() 365 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] = in il4965_prepare_legacy_sensitivity_tbl() 368 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] = in il4965_prepare_legacy_sensitivity_tbl() 370 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] = in il4965_prepare_legacy_sensitivity_tbl() 373 tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck); in il4965_prepare_legacy_sensitivity_tbl() 374 tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm); in il4965_prepare_legacy_sensitivity_tbl() 376 tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] = in il4965_prepare_legacy_sensitivity_tbl() [all …]
|
D | common.h | 2726 #define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A) argument 2727 #define is_siso(tbl) ((tbl) == LQ_SISO) argument 2728 #define is_mimo2(tbl) ((tbl) == LQ_MIMO2) argument 2729 #define is_mimo(tbl) (is_mimo2(tbl)) argument 2730 #define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) argument 2731 #define is_a_band(tbl) ((tbl) == LQ_A) argument 2732 #define is_g_and(tbl) ((tbl) == LQ_G) argument
|
/linux-4.1.27/net/mac80211/ |
D | mesh_pathtbl.c | 76 #define for_each_mesh_entry(tbl, node, i) \ argument 77 for (i = 0; i <= tbl->hash_mask; i++) \ 78 hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list) 118 static void __mesh_table_free(struct mesh_table *tbl) in __mesh_table_free() argument 120 kfree(tbl->hash_buckets); in __mesh_table_free() 121 kfree(tbl->hashwlock); in __mesh_table_free() 122 kfree(tbl); in __mesh_table_free() 125 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) in mesh_table_free() argument 132 mesh_hash = tbl->hash_buckets; in mesh_table_free() 133 for (i = 0; i <= tbl->hash_mask; i++) { in mesh_table_free() [all …]
|
D | sta_info.h | 568 #define for_each_sta_info(local, tbl, _addr, _sta, _tmp) \ argument 569 rht_for_each_entry_rcu(_sta, _tmp, tbl, \ 570 _sta_bucket_idx(tbl, _addr), \
|
D | sta_info.c | 163 const struct bucket_table *tbl; in sta_info_get() local 166 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); in sta_info_get() 168 for_each_sta_info(local, tbl, addr, sta, tmp) { in sta_info_get() 191 const struct bucket_table *tbl; in sta_info_get_bss() local 194 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); in sta_info_get_bss() 196 for_each_sta_info(local, tbl, addr, sta, tmp) { in sta_info_get_bss() 1085 const struct bucket_table *tbl; in ieee80211_find_sta_by_ifaddr() local 1087 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); in ieee80211_find_sta_by_ifaddr() 1093 for_each_sta_info(local, tbl, addr, sta, tmp) { in ieee80211_find_sta_by_ifaddr()
|
D | status.c | 667 const struct bucket_table *tbl; in ieee80211_tx_status() local 676 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); in ieee80211_tx_status() 678 for_each_sta_info(local, tbl, hdr->addr1, sta, tmp) { in ieee80211_tx_status()
|
D | rx.c | 3496 const struct bucket_table *tbl; in __ieee80211_rx_handle_packet() local 3500 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); in __ieee80211_rx_handle_packet() 3502 for_each_sta_info(local, tbl, hdr->addr2, sta, tmp) { in __ieee80211_rx_handle_packet()
|
/linux-4.1.27/net/netfilter/ |
D | xt_repldata.h | 23 } *tbl; \ 25 size_t term_offset = (offsetof(typeof(*tbl), entries[nhooks]) + \ 27 tbl = kzalloc(term_offset + sizeof(*term), GFP_KERNEL); \ 28 if (tbl == NULL) \ 30 term = (struct type##_error *)&(((char *)tbl)[term_offset]); \ 31 strncpy(tbl->repl.name, info->name, sizeof(tbl->repl.name)); \ 33 tbl->repl.valid_hooks = hook_mask; \ 34 tbl->repl.num_entries = nhooks + 1; \ 35 tbl->repl.size = nhooks * sizeof(struct type##_standard) + \ 40 tbl->repl.hook_entry[hooknum] = bytes; \ [all …]
|
/linux-4.1.27/arch/powerpc/include/asm/ |
D | iommu.h | 81 void (*set_bypass)(struct iommu_table *tbl, bool enable); 89 int get_iommu_order(unsigned long size, struct iommu_table *tbl) in get_iommu_order() argument 91 return __ilog2((size - 1) >> tbl->it_page_shift) + 1; in get_iommu_order() 108 extern void iommu_free_table(struct iommu_table *tbl, const char *node_name); 113 extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, 116 extern void iommu_register_group(struct iommu_table *tbl, 122 static inline void iommu_register_group(struct iommu_table *tbl, in iommu_register_group() argument 150 extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, 155 extern void ppc_iommu_unmap_sg(struct iommu_table *tbl, 161 extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, [all …]
|
D | machdep.h | 68 int (*tce_build)(struct iommu_table *tbl, 74 void (*tce_free)(struct iommu_table *tbl, 77 unsigned long (*tce_get)(struct iommu_table *tbl, 79 void (*tce_flush)(struct iommu_table *tbl); 82 int (*tce_build_rm)(struct iommu_table *tbl, 88 void (*tce_free_rm)(struct iommu_table *tbl, 91 void (*tce_flush_rm)(struct iommu_table *tbl);
|
D | time.h | 66 unsigned long tbl; in get_tbl() local 67 asm volatile("mfspr %0, 0x3dd" : "=r" (tbl)); in get_tbl() 68 return tbl; in get_tbl()
|
D | kvm_host.h | 166 u32 tbu, tbl; member
|
/linux-4.1.27/arch/powerpc/platforms/pseries/ |
D | iommu.c | 54 static void tce_invalidate_pSeries_sw(struct iommu_table *tbl, in tce_invalidate_pSeries_sw() argument 57 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index; in tce_invalidate_pSeries_sw() 66 if (tbl->it_busno) { in tce_invalidate_pSeries_sw() 70 start |= tbl->it_busno; in tce_invalidate_pSeries_sw() 71 end |= tbl->it_busno; in tce_invalidate_pSeries_sw() 83 static int tce_build_pSeries(struct iommu_table *tbl, long index, in tce_build_pSeries() argument 97 tces = tcep = ((__be64 *)tbl->it_base) + index; in tce_build_pSeries() 108 if (tbl->it_type & TCE_PCI_SWINV_CREATE) in tce_build_pSeries() 109 tce_invalidate_pSeries_sw(tbl, tces, tcep - 1); in tce_build_pSeries() 114 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) in tce_free_pSeries() argument [all …]
|
/linux-4.1.27/include/net/ |
D | neighbour.h | 74 struct neigh_table *tbl; member 130 #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) argument 134 struct neigh_table *tbl; member 231 return p->tbl->family; in neigh_parms_family() 239 return (char *)n + n->tbl->entry_size; in neighbour_priv() 270 struct neigh_table *tbl, in ___neigh_lookup_noref() argument 278 struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht); in ___neigh_lookup_noref() 293 static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl, in __neigh_lookup_noref() argument 297 return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev); in __neigh_lookup_noref() 300 void neigh_table_init(int index, struct neigh_table *tbl); [all …]
|
D | udp.h | 261 struct udp_table *tbl); 269 int dif, struct udp_table *tbl);
|
/linux-4.1.27/arch/unicore32/mm/ |
D | proc-macros.S | 97 .macro va2pa, va, pa, tbl, msk, off, err=990f 100 adr \tbl, 910f @ tbl <- table of 1st page table 107 add \tbl, \tbl, \off << #3 @ cmove table pointer 108 ldw \msk, [\tbl+], #0 @ get the mask 109 ldw pc, [\tbl+], #4 113 cntlo \tbl, \msk @ use tbl as temp reg 114 mov \off, \off >> \tbl 116 adr \tbl, 920f @ tbl <- table of 2nd pt 130 andn \tbl, \va, \msk 132 or \pa, \pa, \tbl
|
/linux-4.1.27/net/sctp/ |
D | sysctl.c | 320 struct ctl_table tbl; in proc_sctp_do_hmac_alg() local 326 memset(&tbl, 0, sizeof(struct ctl_table)); in proc_sctp_do_hmac_alg() 329 tbl.data = tmp; in proc_sctp_do_hmac_alg() 330 tbl.maxlen = sizeof(tmp); in proc_sctp_do_hmac_alg() 332 tbl.data = net->sctp.sctp_hmac_alg ? : none; in proc_sctp_do_hmac_alg() 333 tbl.maxlen = strlen(tbl.data); in proc_sctp_do_hmac_alg() 336 ret = proc_dostring(&tbl, write, buffer, lenp, ppos); in proc_sctp_do_hmac_alg() 368 struct ctl_table tbl; in proc_sctp_do_rto_min() local 371 memset(&tbl, 0, sizeof(struct ctl_table)); in proc_sctp_do_rto_min() 372 tbl.maxlen = sizeof(unsigned int); in proc_sctp_do_rto_min() [all …]
|
/linux-4.1.27/scripts/dtc/ |
D | livetree.c | 595 struct reserve_info *ri, **tbl; in sort_reserve_entries() local 606 tbl = xmalloc(n * sizeof(*tbl)); in sort_reserve_entries() 611 tbl[i++] = ri; in sort_reserve_entries() 613 qsort(tbl, n, sizeof(*tbl), cmp_reserve_info); in sort_reserve_entries() 615 bi->reservelist = tbl[0]; in sort_reserve_entries() 617 tbl[i]->next = tbl[i+1]; in sort_reserve_entries() 618 tbl[n-1]->next = NULL; in sort_reserve_entries() 620 free(tbl); in sort_reserve_entries() 636 struct property *prop, **tbl; in sort_properties() local 644 tbl = xmalloc(n * sizeof(*tbl)); in sort_properties() [all …]
|
/linux-4.1.27/drivers/hwmon/ |
D | ab8500.c | 60 const struct abx500_res_to_temp *tbl = cfg->temp_tbl; in ab8500_voltage_to_temp() local 66 if (r_ntc > tbl[0].resist || r_ntc < tbl[tbl_sz - 1].resist) in ab8500_voltage_to_temp() 69 while (!(r_ntc <= tbl[i].resist && r_ntc > tbl[i + 1].resist) && in ab8500_voltage_to_temp() 74 *temp = tbl[i].temp * 1000 + ((tbl[i + 1].temp - tbl[i].temp) * 1000 * in ab8500_voltage_to_temp() 75 (r_ntc - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist); in ab8500_voltage_to_temp()
|
/linux-4.1.27/arch/powerpc/platforms/powernv/ |
D | pci.c | 576 static int pnv_tce_build(struct iommu_table *tbl, long index, long npages, in pnv_tce_build() argument 589 tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset; in pnv_tce_build() 590 rpn = __pa(uaddr) >> tbl->it_page_shift; in pnv_tce_build() 594 (rpn++ << tbl->it_page_shift)); in pnv_tce_build() 600 if (tbl->it_type & TCE_PCI_SWINV_CREATE) in pnv_tce_build() 601 pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm); in pnv_tce_build() 606 static int pnv_tce_build_vm(struct iommu_table *tbl, long index, long npages, in pnv_tce_build_vm() argument 611 return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs, in pnv_tce_build_vm() 615 static void pnv_tce_free(struct iommu_table *tbl, long index, long npages, in pnv_tce_free() argument 620 tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset; in pnv_tce_free() [all …]
|
D | pci-ioda.c | 1291 struct iommu_table *tbl; in pnv_pci_ioda2_release_dma_pe() local 1298 tbl = pe->tce32_table; in pnv_pci_ioda2_release_dma_pe() 1299 addr = tbl->it_base; in pnv_pci_ioda2_release_dma_pe() 1313 iommu_free_table(tbl, of_node_full_name(dev->dev.of_node)); in pnv_pci_ioda2_release_dma_pe() 1676 struct iommu_table *tbl, in pnv_pci_ioda1_tce_invalidate() argument 1681 (__be64 __iomem *)tbl->it_index; in pnv_pci_ioda1_tce_invalidate() 1683 const unsigned shift = tbl->it_page_shift; in pnv_pci_ioda1_tce_invalidate() 1689 if (tbl->it_busno) { in pnv_pci_ioda1_tce_invalidate() 1693 start |= tbl->it_busno; in pnv_pci_ioda1_tce_invalidate() 1694 end |= tbl->it_busno; in pnv_pci_ioda1_tce_invalidate() [all …]
|
D | pci.h | 210 extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl, 216 extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
|
/linux-4.1.27/arch/powerpc/kvm/ |
D | book3s_64_vio_hv.c | 57 u64 *tbl; in kvmppc_h_put_tce() local 65 tbl = (u64 *)page_address(page); in kvmppc_h_put_tce() 69 tbl[idx % TCES_PER_PAGE] = tce; in kvmppc_h_put_tce() 89 u64 *tbl; in kvmppc_h_get_tce() local 95 tbl = (u64 *)page_address(page); in kvmppc_h_get_tce() 97 vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE]; in kvmppc_h_get_tce()
|
/linux-4.1.27/net/ipv4/ |
D | sysctl_net_ipv4.c | 152 struct ctl_table tbl = { in proc_tcp_congestion_control() local 160 ret = proc_dostring(&tbl, write, buffer, lenp, ppos); in proc_tcp_congestion_control() 171 struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, }; in proc_tcp_available_congestion_control() local 174 tbl.data = kmalloc(tbl.maxlen, GFP_USER); in proc_tcp_available_congestion_control() 175 if (!tbl.data) in proc_tcp_available_congestion_control() 177 tcp_get_available_congestion_control(tbl.data, TCP_CA_BUF_MAX); in proc_tcp_available_congestion_control() 178 ret = proc_dostring(&tbl, write, buffer, lenp, ppos); in proc_tcp_available_congestion_control() 179 kfree(tbl.data); in proc_tcp_available_congestion_control() 188 struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX }; in proc_allowed_congestion_control() local 191 tbl.data = kmalloc(tbl.maxlen, GFP_USER); in proc_allowed_congestion_control() [all …]
|
D | fib_rules.c | 77 struct fib_table *tbl; in fib4_rule_action() local 96 tbl = fib_get_table(rule->fr_net, rule->table); in fib4_rule_action() 97 if (tbl) in fib4_rule_action() 98 err = fib_table_lookup(tbl, &flp->u.ip4, in fib4_rule_action()
|
D | udp_diag.c | 34 static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, in udp_dump_one() argument 47 req->id.idiag_if, tbl); in udp_dump_one() 55 req->id.idiag_if, tbl); in udp_dump_one()
|
D | devinet.c | 2269 struct ctl_table *tbl = ctl_forward_entry; in devinet_init_net() local 2287 tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL); in devinet_init_net() 2288 if (!tbl) in devinet_init_net() 2291 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1]; in devinet_init_net() 2292 tbl[0].extra1 = all; in devinet_init_net() 2293 tbl[0].extra2 = net; in devinet_init_net() 2307 forw_hdr = register_net_sysctl(net, "net/ipv4", tbl); in devinet_init_net() 2323 if (tbl != ctl_forward_entry) in devinet_init_net() 2324 kfree(tbl); in devinet_init_net() 2339 struct ctl_table *tbl; in devinet_exit_net() local [all …]
|
D | route.c | 2699 struct ctl_table *tbl; in sysctl_route_net_init() local 2701 tbl = ipv4_route_flush_table; in sysctl_route_net_init() 2703 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); in sysctl_route_net_init() 2704 if (!tbl) in sysctl_route_net_init() 2709 tbl[0].procname = NULL; in sysctl_route_net_init() 2711 tbl[0].extra1 = net; in sysctl_route_net_init() 2713 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); in sysctl_route_net_init() 2719 if (tbl != ipv4_route_flush_table) in sysctl_route_net_init() 2720 kfree(tbl); in sysctl_route_net_init() 2727 struct ctl_table *tbl; in sysctl_route_net_exit() local [all …]
|
D | arp.c | 163 .tbl = &arp_tbl,
|
/linux-4.1.27/arch/sparc/kernel/ |
D | iommu.c | 51 struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); in iommu_flushall() 104 iommu->tbl.table_map_base = dma_offset; in iommu_table_init() 110 iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node); in iommu_table_init() 111 if (!iommu->tbl.map) in iommu_table_init() 113 memset(iommu->tbl.map, 0, sz); in iommu_table_init() 115 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, in iommu_table_init() 150 kfree(iommu->tbl.map); in iommu_table_init() 151 iommu->tbl.map = NULL; in iommu_table_init() 162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages() 230 *dma_addrp = (iommu->tbl.table_map_base + in dma_4u_alloc_coherent() [all …]
|
D | pci_sun4v.c | 159 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in dma_4v_alloc_coherent() 165 *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); in dma_4v_alloc_coherent() 190 iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE); in dma_4v_alloc_coherent() 227 entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); in dma_4v_free_coherent() 229 iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); in dma_4v_free_coherent() 256 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in dma_4v_map_page() 262 bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); in dma_4v_map_page() 291 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); in dma_4v_map_page() 318 entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; in dma_4v_unmap_page() 320 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); in dma_4v_unmap_page() [all …]
|
D | head_32.S | 314 sll %g1, 0x8, %g1 ! make phys addr for l1 tbl 338 sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
|
/linux-4.1.27/drivers/clk/tegra/ |
D | clk.c | 217 void __init tegra_init_from_table(struct tegra_clk_init_table *tbl, in tegra_init_from_table() argument 222 for (; tbl->clk_id < clk_max; tbl++) { in tegra_init_from_table() 223 clk = clks[tbl->clk_id]; in tegra_init_from_table() 226 __func__, PTR_ERR(clk), tbl->clk_id); in tegra_init_from_table() 232 if (tbl->parent_id < clk_max) { in tegra_init_from_table() 233 struct clk *parent = clks[tbl->parent_id]; in tegra_init_from_table() 242 if (tbl->rate) in tegra_init_from_table() 243 if (clk_set_rate(clk, tbl->rate)) { in tegra_init_from_table() 245 __func__, tbl->rate, in tegra_init_from_table() 250 if (tbl->state) in tegra_init_from_table()
|
D | clk.h | 594 void tegra_init_from_table(struct tegra_clk_init_table *tbl,
|
/linux-4.1.27/drivers/net/wireless/iwlwifi/mvm/ |
D | rs.c | 141 struct iwl_scale_tbl_info *tbl, 153 struct iwl_scale_tbl_info *tbl, in rs_ant_allow() argument 160 struct iwl_scale_tbl_info *tbl, in rs_mimo_allow() argument 190 struct iwl_scale_tbl_info *tbl, in rs_siso_allow() argument 200 struct iwl_scale_tbl_info *tbl, in rs_sgi_allow() argument 203 struct rs_rate *rate = &tbl->rate; in rs_sgi_allow() 547 struct iwl_scale_tbl_info *tbl) in rs_rate_scale_clear_tbl_windows() argument 553 rs_rate_scale_clear_window(&tbl->win[i]); in rs_rate_scale_clear_tbl_windows() 555 for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++) in rs_rate_scale_clear_tbl_windows() 556 rs_rate_scale_clear_window(&tbl->tpc_win[i]); in rs_rate_scale_clear_tbl_windows() [all …]
|
/linux-4.1.27/drivers/media/usb/gspca/gl860/ |
D | gl860.c | 592 int fetch_validx(struct gspca_dev *gspca_dev, struct validx *tbl, int len) in fetch_validx() argument 597 if (tbl[n].idx != 0xffff) in fetch_validx() 598 ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, in fetch_validx() 599 tbl[n].idx, 0, NULL); in fetch_validx() 600 else if (tbl[n].val == 0xffff) in fetch_validx() 603 msleep(tbl[n].val); in fetch_validx() 608 int keep_on_fetching_validx(struct gspca_dev *gspca_dev, struct validx *tbl, in keep_on_fetching_validx() argument 612 if (tbl[n].idx != 0xffff) in keep_on_fetching_validx() 613 ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, tbl[n].idx, in keep_on_fetching_validx() 615 else if (tbl[n].val == 0xffff) in keep_on_fetching_validx() [all …]
|
D | gl860.h | 91 int fetch_validx(struct gspca_dev *gspca_dev, struct validx *tbl, int len); 92 int keep_on_fetching_validx(struct gspca_dev *gspca_dev, struct validx *tbl, 94 void fetch_idxdata(struct gspca_dev *gspca_dev, struct idxdata *tbl, int len);
|
D | gl860-ov9655.c | 216 u8 **tbl; in ov9655_init_post_alt() local 220 tbl = (reso == IMAGE_640) ? tbl_640 : tbl_1280; in ov9655_init_post_alt() 223 tbl_length[0], tbl[0]); in ov9655_init_post_alt() 226 tbl_length[i], tbl[i]); in ov9655_init_post_alt() 228 tbl_length[7], tbl[7]); in ov9655_init_post_alt()
|
/linux-4.1.27/arch/powerpc/platforms/pasemi/ |
D | iommu.c | 88 static int iobmap_build(struct iommu_table *tbl, long index, in iobmap_build() argument 99 bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT; in iobmap_build() 101 ip = ((u32 *)tbl->it_base) + index; in iobmap_build() 117 static void iobmap_free(struct iommu_table *tbl, long index, in iobmap_free() argument 125 bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT; in iobmap_free() 127 ip = ((u32 *)tbl->it_base) + index; in iobmap_free()
|
/linux-4.1.27/arch/x86/include/asm/ |
D | tce.h | 41 extern void tce_build(struct iommu_table *tbl, unsigned long index, 43 extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); 45 extern void __init free_tce_table(void *tbl);
|
D | calgary.h | 47 void (*handle_quirks)(struct iommu_table *tbl, struct pci_dev *dev); 48 void (*tce_cache_blast)(struct iommu_table *tbl); 49 void (*dump_error_regs)(struct iommu_table *tbl);
|
/linux-4.1.27/net/openvswitch/ |
D | flow_table.c | 489 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, in ovs_flow_tbl_lookup_stats() argument 493 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); in ovs_flow_tbl_lookup_stats() 498 list_for_each_entry_rcu(mask, &tbl->mask_list, list) { in ovs_flow_tbl_lookup_stats() 507 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, in ovs_flow_tbl_lookup() argument 512 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit); in ovs_flow_tbl_lookup() 515 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, in ovs_flow_tbl_lookup_exact() argument 518 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); in ovs_flow_tbl_lookup_exact() 523 list_for_each_entry(mask, &tbl->mask_list, list) { in ovs_flow_tbl_lookup_exact() 554 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl, in ovs_flow_tbl_lookup_ufid() argument 557 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti); in ovs_flow_tbl_lookup_ufid() [all …]
|
D | flow_table.h | 81 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
|
D | flow_netlink.c | 1008 const struct ovs_len_tbl *tbl) in nlattr_set() argument 1015 if (tbl && tbl[nla_type(nla)].len == OVS_ATTR_NESTED) in nlattr_set() 1016 nlattr_set(nla, val, tbl[nla_type(nla)].next); in nlattr_set()
|
/linux-4.1.27/arch/blackfin/kernel/ |
D | cplbinfo.c | 33 struct cplb_entry *tbl; member 55 addr = cdata->tbl[pos].addr; in cplbinfo_show() 56 data = cdata->tbl[pos].data; in cplbinfo_show() 73 cdata->tbl = icplb_tbl[cpu]; in cplbinfo_seq_init() 77 cdata->tbl = dcplb_tbl[cpu]; in cplbinfo_seq_init()
|
/linux-4.1.27/arch/arm64/kernel/ |
D | head.S | 302 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 305 add \tmp2, \tbl, #PAGE_SIZE 307 str \tmp2, [\tbl, \tmp1, lsl #3] 308 add \tbl, \tbl, #PAGE_SIZE // next level table page 318 .macro create_pgd_entry, tbl, virt, tmp1, tmp2 319 create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 321 create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 332 .macro create_block_map, tbl, flags, phys, start, end 339 9999: str \phys, [\tbl, \start, lsl #3] // store the entry
|
/linux-4.1.27/drivers/video/fbdev/riva/ |
D | riva_hw.c | 1344 #define LOAD_FIXED_STATE(tbl,dev) \ 1345 for (i = 0; i < sizeof(tbl##Table##dev)/8; i++) \ 1346 chip->dev[tbl##Table##dev[i][0]] = tbl##Table##dev[i][1] 1347 #define LOAD_FIXED_STATE_8BPP(tbl,dev) \ 1348 for (i = 0; i < sizeof(tbl##Table##dev##_8BPP)/8; i++) \ 1349 chip->dev[tbl##Table##dev##_8BPP[i][0]] = tbl##Table##dev##_8BPP[i][1] 1350 #define LOAD_FIXED_STATE_15BPP(tbl,dev) \ 1351 for (i = 0; i < sizeof(tbl##Table##dev##_15BPP)/8; i++) \ 1352 chip->dev[tbl##Table##dev##_15BPP[i][0]] = tbl##Table##dev##_15BPP[i][1] 1353 #define LOAD_FIXED_STATE_16BPP(tbl,dev) \ [all …]
|
/linux-4.1.27/net/decnet/ |
D | dn_rules.c | 72 struct dn_fib_table *tbl; in dn_fib_rule_action() local 92 tbl = dn_fib_get_table(rule->table, 0); in dn_fib_rule_action() 93 if (tbl == NULL) in dn_fib_rule_action() 96 err = tbl->lookup(tbl, fld, (struct dn_fib_res *)arg->result); in dn_fib_rule_action()
|
D | dn_neigh.c | 87 .tbl = &dn_neigh_table,
|
/linux-4.1.27/arch/powerpc/boot/ |
D | cuboot-c2k.c | 36 struct mv64x60_cpu2pci_win *tbl; in c2k_bridge_setup() local 90 tbl = mv64x60_cpu2pci_io; in c2k_bridge_setup() 93 tbl = mv64x60_cpu2pci_mem; in c2k_bridge_setup() 112 pci_base_hi, pci_base_lo, cpu_base, size, tbl); in c2k_bridge_setup()
|
D | prpmc2800.c | 326 struct mv64x60_cpu2pci_win *tbl; in prpmc2800_bridge_setup() local 369 tbl = mv64x60_cpu2pci_io; in prpmc2800_bridge_setup() 372 tbl = mv64x60_cpu2pci_mem; in prpmc2800_bridge_setup() 391 pci_base_lo, cpu_base, size, tbl); in prpmc2800_bridge_setup()
|
/linux-4.1.27/arch/arc/kernel/ |
D | setup.c | 145 const struct cpuinfo_data *tbl; in arc_cpu_mumbojumbo() local 165 for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) { in arc_cpu_mumbojumbo() 166 if ((core->family >= tbl->info.id) && in arc_cpu_mumbojumbo() 167 (core->family <= tbl->up_range)) { in arc_cpu_mumbojumbo() 170 cpu_id, tbl->info.str, isa_nm, in arc_cpu_mumbojumbo() 176 if (tbl->info.id == 0) in arc_cpu_mumbojumbo()
|
/linux-4.1.27/arch/arm64/crypto/ |
D | aes-neon.S | 53 tbl \in\().16b, {v16.16b-v19.16b}, \in\().16b 90 tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */ 120 tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b 121 tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b 136 tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b 138 tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b 140 tbl \in2\().16b, {v16.16b-v19.16b}, \in2\().16b 142 tbl \in3\().16b, {v16.16b-v19.16b}, \in3\().16b 238 tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */ 239 tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */ [all …]
|
/linux-4.1.27/firmware/av7110/ |
D | Boot.S | 46 .word tbl // table needed by firmware ROM 47 tbl: .word (endtbl - tbl) label
|
/linux-4.1.27/fs/cifs/ |
D | winucase.c | 643 const wchar_t *tbl; in cifs_toupper() local 650 tbl = toplevel[idx]; in cifs_toupper() 651 if (!tbl) in cifs_toupper() 658 out = tbl[idx]; in cifs_toupper()
|
/linux-4.1.27/arch/powerpc/sysdev/ |
D | dart_iommu.c | 154 static void dart_flush(struct iommu_table *tbl) in dart_flush() argument 163 static int dart_build(struct iommu_table *tbl, long index, in dart_build() argument 174 dp = ((unsigned int*)tbl->it_base) + index; in dart_build() 204 static void dart_free(struct iommu_table *tbl, long index, long npages) in dart_free() argument 215 dp = ((unsigned int *)tbl->it_base) + index; in dart_free()
|
/linux-4.1.27/net/netlink/ |
D | diag.c | 105 struct netlink_table *tbl = &nl_table[protocol]; in __netlink_diag_dump() local 106 struct rhashtable *ht = &tbl->hash; in __netlink_diag_dump() 107 const struct bucket_table *htbl = rht_dereference_rcu(ht->tbl, ht); in __netlink_diag_dump() 142 sk_for_each_bound(sk, &tbl->mc_list) { in __netlink_diag_dump()
|
D | af_netlink.c | 1071 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; in netlink_update_listeners() local 1076 listeners = nl_deref_protected(tbl->listeners); in netlink_update_listeners() 1080 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { in netlink_update_listeners() 1082 sk_for_each_bound(sk, &tbl->mc_list) { in netlink_update_listeners() 2605 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; in __netlink_change_ngroups() local 2610 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { in __netlink_change_ngroups() 2614 old = nl_deref_protected(tbl->listeners); in __netlink_change_ngroups() 2615 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); in __netlink_change_ngroups() 2616 rcu_assign_pointer(tbl->listeners, new); in __netlink_change_ngroups() 2620 tbl->groups = groups; in __netlink_change_ngroups() [all …]
|
/linux-4.1.27/drivers/power/ |
D | ab8500_btemp.c | 471 const struct abx500_res_to_temp *tbl, int tbl_size, int res) in ab8500_btemp_res_to_temp() argument 480 if (res > tbl[0].resist) in ab8500_btemp_res_to_temp() 482 else if (res <= tbl[tbl_size - 1].resist) in ab8500_btemp_res_to_temp() 486 while (!(res <= tbl[i].resist && in ab8500_btemp_res_to_temp() 487 res > tbl[i + 1].resist)) in ab8500_btemp_res_to_temp() 491 temp = tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) * in ab8500_btemp_res_to_temp() 492 (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist); in ab8500_btemp_res_to_temp()
|
D | ab8500_fg.c | 864 const struct abx500_v_to_cap *tbl; in ab8500_fg_volt_to_capacity() local 867 tbl = di->bm->bat_type[di->bm->batt_id].v_to_cap_tbl, in ab8500_fg_volt_to_capacity() 871 if (voltage > tbl[i].voltage) in ab8500_fg_volt_to_capacity() 877 tbl[i].voltage, in ab8500_fg_volt_to_capacity() 878 tbl[i].capacity * 10, in ab8500_fg_volt_to_capacity() 879 tbl[i-1].voltage, in ab8500_fg_volt_to_capacity() 880 tbl[i-1].capacity * 10); in ab8500_fg_volt_to_capacity() 916 const struct batres_vs_temp *tbl; in ab8500_fg_battery_resistance() local 919 tbl = di->bm->bat_type[di->bm->batt_id].batres_tbl; in ab8500_fg_battery_resistance() 923 if (di->bat_temp / 10 > tbl[i].temp) in ab8500_fg_battery_resistance() [all …]
|
D | bq24190_charger.c | 213 static u8 bq24190_find_idx(const int tbl[], int tbl_size, int v) in bq24190_find_idx() argument 218 if (v < tbl[i]) in bq24190_find_idx() 278 const int tbl[], int tbl_size, in bq24190_get_field_val() argument 289 *val = tbl[v]; in bq24190_get_field_val() 296 const int tbl[], int tbl_size, in bq24190_set_field_val() argument 301 idx = bq24190_find_idx(tbl, tbl_size, val); in bq24190_set_field_val()
|
D | smb347-charger.c | 206 static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val) in hw_to_current() argument 210 return tbl[val]; in hw_to_current() 214 static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val) in current_to_hw() argument 219 if (val < tbl[i]) in current_to_hw()
|
/linux-4.1.27/net/netlabel/ |
D | netlabel_domainhash.c | 48 struct list_head *tbl; member 148 bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt]; in netlbl_domhsh_search() 337 hsh_tbl->tbl = kcalloc(hsh_tbl->size, in netlbl_domhsh_init() 340 if (hsh_tbl->tbl == NULL) { in netlbl_domhsh_init() 345 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); in netlbl_domhsh_init() 397 &rcu_dereference(netlbl_domhsh)->tbl[bkt]); in netlbl_domhsh_add() 772 iter_list = &rcu_dereference(netlbl_domhsh)->tbl[iter_bkt]; in netlbl_domhsh_walk()
|
D | netlabel_unlabeled.c | 77 struct list_head *tbl; member 229 bkt_list = &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]; in netlbl_unlhsh_search_iface() 351 &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]); in netlbl_unlhsh_add_iface() 1204 iter_list = &rcu_dereference(netlbl_unlhsh)->tbl[iter_bkt]; in netlbl_unlabel_staticlist() 1430 hsh_tbl->tbl = kcalloc(hsh_tbl->size, in netlbl_unlabel_init() 1433 if (hsh_tbl->tbl == NULL) { in netlbl_unlabel_init() 1438 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); in netlbl_unlabel_init()
|
/linux-4.1.27/drivers/sbus/char/ |
D | envctrl.c | 328 int scale, char *tbl, char *bufdata) in envctrl_i2c_data_translate() argument 342 bufdata[0] = tbl[data]; in envctrl_i2c_data_translate() 347 sprintf(bufdata,"%d ", (tbl[data] * 10) / (scale)); in envctrl_i2c_data_translate() 368 char *tbl, j = -1; in envctrl_read_cpu_info() local 387 tbl = pchild->tables + pchild->tblprop_array[i].offset; in envctrl_read_cpu_info() 391 tbl, bufdata); in envctrl_read_cpu_info() 403 char *tbl = NULL; in envctrl_read_noncpu_info() local 418 tbl = pchild->tables + pchild->tblprop_array[i].offset; in envctrl_read_noncpu_info() 422 tbl, bufdata); in envctrl_read_noncpu_info()
|
/linux-4.1.27/drivers/net/wireless/ath/wil6210/ |
D | debugfs.c | 326 const struct dbg_off * const tbl) in wil6210_debugfs_init_offset() argument 330 for (i = 0; tbl[i].name; i++) { in wil6210_debugfs_init_offset() 333 switch (tbl[i].type) { in wil6210_debugfs_init_offset() 335 f = debugfs_create_u32(tbl[i].name, tbl[i].mode, dbg, in wil6210_debugfs_init_offset() 336 base + tbl[i].off); in wil6210_debugfs_init_offset() 339 f = debugfs_create_x32(tbl[i].name, tbl[i].mode, dbg, in wil6210_debugfs_init_offset() 340 base + tbl[i].off); in wil6210_debugfs_init_offset() 343 f = wil_debugfs_create_ulong(tbl[i].name, tbl[i].mode, in wil6210_debugfs_init_offset() 344 dbg, base + tbl[i].off); in wil6210_debugfs_init_offset() 347 f = wil_debugfs_create_iomem_x32(tbl[i].name, in wil6210_debugfs_init_offset() [all …]
|
/linux-4.1.27/arch/x86/syscalls/ |
D | Makefile | 8 syscall32 := $(srctree)/$(src)/syscall_32.tbl 9 syscall64 := $(srctree)/$(src)/syscall_64.tbl
|
/linux-4.1.27/arch/arm/kernel/ |
D | entry-common.S | 176 adr tbl, sys_call_table @ load syscall table pointer 187 ldrne tbl, =sys_oabi_call_table 202 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 242 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 293 ldrlo pc, [tbl, scno, lsl #2]
|
D | entry-header.S | 399 tbl .req r8 @ syscall table pointer label
|
/linux-4.1.27/drivers/mfd/ |
D | rtsx_pcr.c | 562 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl) in rtsx_pci_set_pull_ctl() argument 568 while (*tbl & 0xFFFF0000) { in rtsx_pci_set_pull_ctl() 570 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl)); in rtsx_pci_set_pull_ctl() 571 tbl++; in rtsx_pci_set_pull_ctl() 583 const u32 *tbl; in rtsx_pci_card_pull_ctl_enable() local 586 tbl = pcr->sd_pull_ctl_enable_tbl; in rtsx_pci_card_pull_ctl_enable() 588 tbl = pcr->ms_pull_ctl_enable_tbl; in rtsx_pci_card_pull_ctl_enable() 592 return rtsx_pci_set_pull_ctl(pcr, tbl); in rtsx_pci_card_pull_ctl_enable() 598 const u32 *tbl; in rtsx_pci_card_pull_ctl_disable() local 601 tbl = pcr->sd_pull_ctl_disable_tbl; in rtsx_pci_card_pull_ctl_disable() [all …]
|
D | menelaus.c | 493 static int menelaus_get_vtg_value(int vtg, const struct menelaus_vtg_value *tbl, in menelaus_get_vtg_value() argument 498 for (i = 0; i < n; i++, tbl++) in menelaus_get_vtg_value() 499 if (tbl->vtg == vtg) in menelaus_get_vtg_value() 500 return tbl->val; in menelaus_get_vtg_value()
|
/linux-4.1.27/arch/powerpc/platforms/cell/ |
D | iommu.c | 167 static int tce_build_cell(struct iommu_table *tbl, long index, long npages, in tce_build_cell() argument 174 container_of(tbl, struct iommu_window, table); in tce_build_cell() 199 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); in tce_build_cell() 201 for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift)) in tce_build_cell() 213 static void tce_free_cell(struct iommu_table *tbl, long index, long npages) in tce_free_cell() argument 219 container_of(tbl, struct iommu_window, table); in tce_free_cell() 233 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); in tce_free_cell()
|
/linux-4.1.27/sound/pci/ |
D | intel8x0m.c | 772 struct ich_pcm_table *tbl, *rec; in snd_intel8x0m_pcm() local 775 tbl = intel_pcms; in snd_intel8x0m_pcm() 780 tbl = nforce_pcms; in snd_intel8x0m_pcm() 784 tbl = ali_pcms; in snd_intel8x0m_pcm() 788 tbl = intel_pcms; in snd_intel8x0m_pcm() 795 rec = tbl + i; in snd_intel8x0m_pcm() 1129 struct ich_reg_info *tbl; in snd_intel8x0m_create() local 1191 tbl = intel_regs; in snd_intel8x0m_create() 1196 ichdev->reg_offset = tbl[i].offset; in snd_intel8x0m_create() 1197 ichdev->int_sta_mask = tbl[i].int_sta_mask; in snd_intel8x0m_create()
|
D | intel8x0.c | 1668 struct ich_pcm_table *tbl, *rec; in snd_intel8x0_pcm() local 1672 tbl = intel_pcms; in snd_intel8x0_pcm() 1678 tbl = nforce_pcms; in snd_intel8x0_pcm() 1684 tbl = ali_pcms; in snd_intel8x0_pcm() 1688 tbl = intel_pcms; in snd_intel8x0_pcm() 1695 rec = tbl + i; in snd_intel8x0_pcm() 3050 struct ich_reg_info *tbl; in snd_intel8x0_create() local 3117 tbl = nforce_regs; in snd_intel8x0_create() 3120 tbl = ali_regs; in snd_intel8x0_create() 3123 tbl = intel_regs; in snd_intel8x0_create() [all …]
|
/linux-4.1.27/drivers/net/bonding/ |
D | bond_options.c | 458 const struct bond_opt_value *tbl; in bond_opt_parse() local 467 tbl = opt->values; in bond_opt_parse() 468 if (!tbl) in bond_opt_parse() 495 for (i = 0; tbl[i].string; i++) { in bond_opt_parse() 498 if (val->value == tbl[i].value) in bond_opt_parse() 499 ret = &tbl[i]; in bond_opt_parse() 502 (tbl[i].flags & BOND_VALFLAG_DEFAULT)) in bond_opt_parse() 503 ret = &tbl[i]; in bond_opt_parse() 505 if (!strcmp(valstr, tbl[i].string)) in bond_opt_parse() 506 ret = &tbl[i]; in bond_opt_parse()
|
/linux-4.1.27/sound/core/ |
D | sgbuf.c | 31 #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN) argument
|
/linux-4.1.27/sound/core/oss/ |
D | mixer_oss.c | 1181 struct snd_mixer_oss_assign_table *tbl; in snd_mixer_oss_proc_write() local 1214 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); in snd_mixer_oss_proc_write() 1215 if (!tbl) in snd_mixer_oss_proc_write() 1217 tbl->oss_id = ch; in snd_mixer_oss_proc_write() 1218 tbl->name = kstrdup(str, GFP_KERNEL); in snd_mixer_oss_proc_write() 1219 if (! tbl->name) { in snd_mixer_oss_proc_write() 1220 kfree(tbl); in snd_mixer_oss_proc_write() 1223 tbl->index = idx; in snd_mixer_oss_proc_write() 1224 if (snd_mixer_oss_build_input(mixer, tbl, 1, 1) <= 0) { in snd_mixer_oss_proc_write() 1225 kfree(tbl->name); in snd_mixer_oss_proc_write() [all …]
|
/linux-4.1.27/arch/x86/tools/ |
D | gen-insn-attr-x86.awk | 156 function print_table(tbl,name,fmt,n) 161 if (tbl[id]) 162 print " [" id "] = " tbl[id] ","
|
/linux-4.1.27/drivers/iommu/ |
D | amd_iommu_v2.c | 296 static void free_pasid_states_level1(struct pasid_state **tbl) in free_pasid_states_level1() argument 301 if (tbl[i] == NULL) in free_pasid_states_level1() 304 free_page((unsigned long)tbl[i]); in free_pasid_states_level1() 308 static void free_pasid_states_level2(struct pasid_state **tbl) in free_pasid_states_level2() argument 314 if (tbl[i] == NULL) in free_pasid_states_level2() 317 ptr = (struct pasid_state **)tbl[i]; in free_pasid_states_level2()
|
D | amd_iommu.c | 1923 static void free_gcr3_tbl_level1(u64 *tbl) in free_gcr3_tbl_level1() argument 1929 if (!(tbl[i] & GCR3_VALID)) in free_gcr3_tbl_level1() 1932 ptr = __va(tbl[i] & PAGE_MASK); in free_gcr3_tbl_level1() 1938 static void free_gcr3_tbl_level2(u64 *tbl) in free_gcr3_tbl_level2() argument 1944 if (!(tbl[i] & GCR3_VALID)) in free_gcr3_tbl_level2() 1947 ptr = __va(tbl[i] & PAGE_MASK); in free_gcr3_tbl_level2()
|
/linux-4.1.27/net/ipv6/ |
D | ip6_fib.c | 1882 struct fib6_table *tbl; member 1932 iter->w.root = &iter->tbl->tb6_root; in ipv6_route_seq_setup_walk() 1941 static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl, in ipv6_route_seq_next_table() argument 1947 if (tbl) { in ipv6_route_seq_next_table() 1948 h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1; in ipv6_route_seq_next_table() 1949 node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist)); in ipv6_route_seq_next_table() 1991 read_lock(&iter->tbl->tb6_lock); in ipv6_route_seq_next() 1993 read_unlock(&iter->tbl->tb6_lock); in ipv6_route_seq_next() 2004 iter->tbl = ipv6_route_seq_next_table(iter->tbl, net); in ipv6_route_seq_next() 2005 if (!iter->tbl) in ipv6_route_seq_next() [all …]
|
D | ndisc.c | 130 .tbl = &nd_tbl,
|
/linux-4.1.27/arch/sparc/include/asm/ |
D | iommu_64.h | 28 struct iommu_map_table tbl; member
|
/linux-4.1.27/scripts/ |
D | checksyscalls.sh | 215 (ignore_list && syscall_list $(dirname $0)/../arch/x86/syscalls/syscall_32.tbl) | \
|
/linux-4.1.27/arch/parisc/include/asm/ |
D | pdc.h | 298 int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl); 306 struct pdc_memory_table *tbl, unsigned long entries);
|
/linux-4.1.27/arch/cris/include/arch-v32/arch/hwregs/ |
D | eth_defs.h | 119 unsigned int tbl : 32; member 126 unsigned int tbl : 32; member
|
/linux-4.1.27/arch/parisc/kernel/ |
D | firmware.c | 873 int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl) in pdc_pci_irt() argument 878 BUG_ON((unsigned long)tbl & 0x7); in pdc_pci_irt() 883 __pa(pdc_result), hpa, __pa(tbl)); in pdc_pci_irt() 982 struct pdc_memory_table *tbl, unsigned long entries) in pdc_mem_mem_table() argument 991 memcpy(tbl, pdc_result2, entries * sizeof(*tbl)); in pdc_mem_mem_table()
|
/linux-4.1.27/drivers/net/wireless/cw1200/ |
D | txrx.h | 26 __le32 tbl[3]; member
|
D | txrx.c | 187 policy->tbl[off] |= __cpu_to_le32(retries << shift); in tx_policy_build() 369 &arg.tbl[arg.num]; in tx_policy_upload() 376 memcpy(dst->rate_count_indices, src->tbl, in tx_policy_upload()
|
D | wsm.h | 1559 struct wsm_tx_rate_retry_policy tbl[8]; member
|
/linux-4.1.27/drivers/net/wireless/ath/ath6kl/ |
D | debug.c | 1118 const struct wmi_target_roam_tbl *tbl; in ath6kl_debug_roam_tbl_event() local 1121 if (len < sizeof(*tbl)) in ath6kl_debug_roam_tbl_event() 1124 tbl = (const struct wmi_target_roam_tbl *) buf; in ath6kl_debug_roam_tbl_event() 1125 num_entries = le16_to_cpu(tbl->num_entries); in ath6kl_debug_roam_tbl_event() 1126 if (sizeof(*tbl) + num_entries * sizeof(struct wmi_bss_roam_info) > in ath6kl_debug_roam_tbl_event() 1155 struct wmi_target_roam_tbl *tbl; in ath6kl_roam_table_read() local 1182 tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl; in ath6kl_roam_table_read() 1183 num_entries = le16_to_cpu(tbl->num_entries); in ath6kl_roam_table_read() 1193 le16_to_cpu(tbl->roam_mode)); in ath6kl_roam_table_read() 1196 struct wmi_bss_roam_info *info = &tbl->info[i]; in ath6kl_roam_table_read()
|
/linux-4.1.27/net/802/ |
D | hippi.c | 154 if (p->tbl->family != AF_INET6) in hippi_neigh_setup_dev()
|
/linux-4.1.27/net/tipc/ |
D | socket.c | 2233 const struct bucket_table *tbl; in tipc_sk_reinit() local 2240 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); in tipc_sk_reinit() 2241 for (i = 0; i < tbl->size; i++) { in tipc_sk_reinit() 2242 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) { in tipc_sk_reinit() 2675 const struct bucket_table *tbl; in tipc_nl_sk_dump() local 2683 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); in tipc_nl_sk_dump() 2684 for (; tbl_id < tbl->size; tbl_id++) { in tipc_nl_sk_dump() 2685 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { in tipc_nl_sk_dump()
|
/linux-4.1.27/drivers/staging/media/davinci_vpfe/ |
D | dm365_ipipe_hw.c | 776 struct vpfe_ipipe_3d_lut_entry *tbl; in ipipe_set_3d_lut_regs() local 793 tbl = lut_3d->table; in ipipe_set_3d_lut_regs() 797 val = tbl[i].b & D3_LUT_ENTRY_MASK; in ipipe_set_3d_lut_regs() 798 val |= (tbl[i].g & D3_LUT_ENTRY_MASK) << in ipipe_set_3d_lut_regs() 800 val |= (tbl[i].r & D3_LUT_ENTRY_MASK) << in ipipe_set_3d_lut_regs()
|
/linux-4.1.27/tools/perf/ |
D | builtin-kvm.c | 74 struct exit_reasons_table *tbl, in get_exit_reason() argument 77 while (tbl->reason != NULL) { in get_exit_reason() 78 if (tbl->exit_code == exit_code) in get_exit_reason() 79 return tbl->reason; in get_exit_reason() 80 tbl++; in get_exit_reason()
|
/linux-4.1.27/drivers/net/ethernet/ibm/ |
D | ibmveth.c | 1298 struct iommu_table *tbl; in ibmveth_get_desired_dma() local 1303 tbl = get_iommu_table_base(&vdev->dev); in ibmveth_get_desired_dma() 1307 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl); in ibmveth_get_desired_dma() 1312 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl); in ibmveth_get_desired_dma() 1320 buff_size, tbl); in ibmveth_get_desired_dma() 1325 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl); in ibmveth_get_desired_dma()
|
/linux-4.1.27/sound/pci/ice1712/ |
D | ice1724.c | 2310 struct snd_ice1712_card_info * const *tbl, *c; in snd_vt1724_read_eeprom() local 2339 for (tbl = card_tables; *tbl; tbl++) { in snd_vt1724_read_eeprom() 2340 for (c = *tbl; c->name; c++) { in snd_vt1724_read_eeprom() 2624 struct snd_ice1712_card_info * const *tbl, *c; in snd_vt1724_probe() local 2650 for (tbl = card_tables; *tbl; tbl++) { in snd_vt1724_probe() 2651 for (c = *tbl; c->name; c++) { in snd_vt1724_probe()
|
D | ice1712.c | 2290 struct snd_ice1712_card_info * const *tbl, *c; in snd_ice1712_read_eeprom() local 2313 for (tbl = card_tables; *tbl; tbl++) { in snd_ice1712_read_eeprom() 2314 for (c = *tbl; c->subvendor; c++) { in snd_ice1712_read_eeprom() 2635 struct snd_ice1712_card_info * const *tbl, *c; in snd_ice1712_probe() local 2659 for (tbl = card_tables; *tbl; tbl++) { in snd_ice1712_probe() 2660 for (c = *tbl; c->subvendor; c++) { in snd_ice1712_probe()
|
/linux-4.1.27/arch/unicore32/kernel/ |
D | entry.S | 137 tbl .req r22 @ syscall table pointer label 618 ldw tbl, =sys_call_table @ load syscall table pointer 631 ldw pc, [tbl+], scno << #2 @ call sys_* routine 653 ldw pc, [tbl+], scno << #2 @ call sys_* routine
|
/linux-4.1.27/arch/mips/include/asm/txx9/ |
D | tx3927.h | 112 volatile unsigned long tbl; /* +d0 */ member
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/ |
D | l2t.c | 368 int addr_len = neigh->tbl->key_len; in cxgb4_l2t_get() 481 int addr_len = neigh->tbl->key_len; in t4_l2t_update()
|
/linux-4.1.27/drivers/char/tpm/ |
D | tpm_tis.c | 124 struct acpi_table_tpm2 *tbl; in is_fifo() local 132 (struct acpi_table_header **) &tbl); in is_fifo() 138 if (le32_to_cpu(tbl->start_method) != TPM2_START_FIFO) in is_fifo()
|
/linux-4.1.27/fs/nfs/filelayout/ |
D | filelayout.c | 131 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; in filelayout_async_handle_error() local 190 rpc_wake_up(&tbl->slot_tbl_waitq); in filelayout_async_handle_error() 204 rpc_wake_up(&tbl->slot_tbl_waitq); in filelayout_async_handle_error()
|
/linux-4.1.27/sound/pci/ac97/ |
D | ac97_codec.c | 1103 const struct snd_ac97_res_table *tbl; in check_volume_resolution() local 1104 for (tbl = ac97->res_table; tbl->reg; tbl++) { in check_volume_resolution() 1105 if (tbl->reg == reg) { in check_volume_resolution() 1106 *lo_max = tbl->bits & 0xff; in check_volume_resolution() 1107 *hi_max = (tbl->bits >> 8) & 0xff; in check_volume_resolution()
|
/linux-4.1.27/kernel/ |
D | workqueue.c | 5114 cpumask_var_t *tbl; in wq_numa_init() local 5133 tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL); in wq_numa_init() 5134 BUG_ON(!tbl); in wq_numa_init() 5137 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL, in wq_numa_init() 5147 cpumask_set_cpu(cpu, tbl[node]); in wq_numa_init() 5150 wq_numa_possible_cpumask = tbl; in wq_numa_init()
|
/linux-4.1.27/drivers/media/platform/s5p-jpeg/ |
D | jpeg-core.c | 702 const unsigned char *tbl, in exynos4_jpeg_set_tbl() argument 709 dword = tbl[i] | in exynos4_jpeg_set_tbl() 710 (tbl[i + 1] << 8) | in exynos4_jpeg_set_tbl() 711 (tbl[i + 2] << 16) | in exynos4_jpeg_set_tbl() 712 (tbl[i + 3] << 24); in exynos4_jpeg_set_tbl()
|
/linux-4.1.27/fs/nfs/flexfilelayout/ |
D | flexfilelayout.c | 683 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; in ff_layout_async_handle_error_v4() local 745 rpc_wake_up(&tbl->slot_tbl_waitq); in ff_layout_async_handle_error_v4() 758 rpc_wake_up(&tbl->slot_tbl_waitq); in ff_layout_async_handle_error_v4()
|
/linux-4.1.27/drivers/misc/carma/ |
D | carma-fpga.c | 1277 static unsigned int data_poll(struct file *filp, struct poll_table_struct *tbl) in data_poll() argument 1283 poll_wait(filp, &priv->wait, tbl); in data_poll()
|
/linux-4.1.27/net/sched/ |
D | sch_teql.c | 233 mn = __neigh_lookup_errno(n->tbl, n->primary_key, dev); in __teql_resolve()
|
/linux-4.1.27/net/atm/ |
D | lec.c | 834 static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl, in lec_tbl_walk() argument 840 e = tbl->first; in lec_tbl_walk() 842 e = tbl->first; in lec_tbl_walk()
|
D | clip.c | 293 if (neigh->tbl->family != AF_INET) in clip_constructor()
|
/linux-4.1.27/sound/pci/hda/ |
D | patch_ca0132.c | 4404 struct hda_jack_tbl *tbl; in hp_callback() local 4411 tbl = snd_hda_jack_tbl_get(codec, cb->nid); in hp_callback() 4412 if (tbl) in hp_callback() 4413 tbl->block_report = 1; in hp_callback()
|
D | patch_cirrus.c | 1019 struct hda_jack_callback *tbl) in cs4210_spdif_automute() argument
|
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_cmn.c | 4628 struct msix_entry *tbl; in bnx2x_alloc_mem_bp() local 4684 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL); in bnx2x_alloc_mem_bp() 4685 if (!tbl) in bnx2x_alloc_mem_bp() 4687 bp->msix_table = tbl; in bnx2x_alloc_mem_bp()
|
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmsmac/phy/ |
D | phy_n.c | 14164 struct phytbl_info tbl; in wlc_phy_table_write_nphy() local 14166 tbl.tbl_id = id; in wlc_phy_table_write_nphy() 14167 tbl.tbl_len = len; in wlc_phy_table_write_nphy() 14168 tbl.tbl_offset = offset; in wlc_phy_table_write_nphy() 14169 tbl.tbl_width = width; in wlc_phy_table_write_nphy() 14170 tbl.tbl_ptr = data; in wlc_phy_table_write_nphy() 14171 wlc_phy_write_table_nphy(pi, &tbl); in wlc_phy_table_write_nphy() 14178 struct phytbl_info tbl; in wlc_phy_table_read_nphy() local 14180 tbl.tbl_id = id; in wlc_phy_table_read_nphy() 14181 tbl.tbl_len = len; in wlc_phy_table_read_nphy() [all …]
|
/linux-4.1.27/fs/nfsd/ |
D | nfs4state.c | 2106 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) in find_client_in_id_table() argument 2111 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { in find_client_in_id_table() 2125 struct list_head *tbl = nn->conf_id_hashtbl; in find_confirmed_client() local 2128 return find_client_in_id_table(tbl, clid, sessions); in find_confirmed_client() 2134 struct list_head *tbl = nn->unconf_id_hashtbl; in find_unconfirmed_client() local 2137 return find_client_in_id_table(tbl, clid, sessions); in find_unconfirmed_client()
|
/linux-4.1.27/drivers/s390/net/ |
D | qeth_l3_main.c | 3245 if (np->tbl->family == AF_INET) in qeth_l3_neigh_setup()
|
/linux-4.1.27/drivers/net/ethernet/sun/ |
D | niu.c | 4733 struct rdc_table *tbl = &tp->tables[i]; in niu_init_rdc_groups() local 4739 tbl->rxdma_channel[slot]); in niu_init_rdc_groups()
|
/linux-4.1.27/drivers/net/ethernet/rocker/ |
D | rocker.c | 5010 if (n->tbl != &arp_tbl) in rocker_netevent_event()
|