| /linux-4.4.14/fs/nfs/ |
| D | nfs4session.c | 26 static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue) in nfs4_init_slot_table() argument 28 tbl->highest_used_slotid = NFS4_NO_SLOT; in nfs4_init_slot_table() 29 spin_lock_init(&tbl->slot_tbl_lock); in nfs4_init_slot_table() 30 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue); in nfs4_init_slot_table() 31 init_completion(&tbl->complete); in nfs4_init_slot_table() 37 static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl, u32 newsize) in nfs4_shrink_slot_table() argument 40 if (newsize >= tbl->max_slots) in nfs4_shrink_slot_table() 43 p = &tbl->slots; in nfs4_shrink_slot_table() 51 tbl->max_slots--; in nfs4_shrink_slot_table() 60 void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl) in nfs4_slot_tbl_drain_complete() argument [all …]
|
| D | nfs4session.h | 76 extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, 78 extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl); 79 extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl); 80 extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot); 81 extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl); 82 bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl, 84 void nfs41_wake_slot_table(struct nfs4_slot_table *tbl); 86 static inline bool nfs4_slot_tbl_draining(struct nfs4_slot_table *tbl) in nfs4_slot_tbl_draining() argument 88 return !!test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state); in nfs4_slot_tbl_draining() 92 extern void nfs41_set_target_slotid(struct nfs4_slot_table *tbl, [all …]
|
| D | callback_proc.c | 319 validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) in validate_seqid() argument 329 slot = tbl->slots + args->csa_slotid; in validate_seqid() 358 tbl->highest_used_slotid = args->csa_slotid; in validate_seqid() 374 struct nfs4_slot_table *tbl; in referring_call_exists() local 383 tbl = &session->fc_slot_table; in referring_call_exists() 403 spin_lock(&tbl->slot_tbl_lock); in referring_call_exists() 404 status = (test_bit(ref->rc_slotid, tbl->used_slots) && in referring_call_exists() 405 tbl->slots[ref->rc_slotid].seq_nr == in referring_call_exists() 407 spin_unlock(&tbl->slot_tbl_lock); in referring_call_exists() 421 struct nfs4_slot_table *tbl; in nfs4_callback_sequence() local [all …]
|
| D | nfs4client.c | 275 struct nfs4_slot_table *tbl; in nfs40_init_client() local 278 tbl = kzalloc(sizeof(*tbl), GFP_NOFS); in nfs40_init_client() 279 if (tbl == NULL) in nfs40_init_client() 282 ret = nfs4_setup_slot_table(tbl, NFS4_MAX_SLOT_TABLE, in nfs40_init_client() 285 kfree(tbl); in nfs40_init_client() 289 clp->cl_slot_tbl = tbl; in nfs40_init_client()
|
| D | nfs4state.c | 218 static void nfs4_end_drain_slot_table(struct nfs4_slot_table *tbl) in nfs4_end_drain_slot_table() argument 220 if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) { in nfs4_end_drain_slot_table() 221 spin_lock(&tbl->slot_tbl_lock); in nfs4_end_drain_slot_table() 222 nfs41_wake_slot_table(tbl); in nfs4_end_drain_slot_table() 223 spin_unlock(&tbl->slot_tbl_lock); in nfs4_end_drain_slot_table() 242 static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl) in nfs4_drain_slot_tbl() argument 244 set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state); in nfs4_drain_slot_tbl() 245 spin_lock(&tbl->slot_tbl_lock); in nfs4_drain_slot_tbl() 246 if (tbl->highest_used_slotid != NFS4_NO_SLOT) { in nfs4_drain_slot_tbl() 247 reinit_completion(&tbl->complete); in nfs4_drain_slot_tbl() [all …]
|
| D | callback_xdr.c | 757 struct nfs4_slot_table *tbl = &session->bc_slot_table; in nfs4_callback_free_slot() local 759 spin_lock(&tbl->slot_tbl_lock); in nfs4_callback_free_slot() 764 tbl->highest_used_slotid = NFS4_NO_SLOT; in nfs4_callback_free_slot() 765 nfs4_slot_tbl_drain_complete(tbl); in nfs4_callback_free_slot() 766 spin_unlock(&tbl->slot_tbl_lock); in nfs4_callback_free_slot()
|
| D | nfs4proc.c | 570 int nfs40_setup_sequence(struct nfs4_slot_table *tbl, in nfs40_setup_sequence() argument 581 spin_lock(&tbl->slot_tbl_lock); in nfs40_setup_sequence() 582 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) in nfs40_setup_sequence() 585 slot = nfs4_alloc_slot(tbl); in nfs40_setup_sequence() 591 spin_unlock(&tbl->slot_tbl_lock); in nfs40_setup_sequence() 602 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, in nfs40_setup_sequence() 605 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); in nfs40_setup_sequence() 606 spin_unlock(&tbl->slot_tbl_lock); in nfs40_setup_sequence() 615 struct nfs4_slot_table *tbl; in nfs40_sequence_done() local 620 tbl = slot->table; in nfs40_sequence_done() [all …]
|
| D | nfs4_fs.h | 450 extern int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
|
| /linux-4.4.14/arch/powerpc/kernel/ |
| D | iommu.c | 177 struct iommu_table *tbl, in iommu_range_alloc() argument 211 pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc() 214 pool = &(tbl->large_pool); in iommu_range_alloc() 216 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc() 236 if (limit + tbl->it_offset > mask) { in iommu_range_alloc() 237 limit = mask - tbl->it_offset + 1; in iommu_range_alloc() 244 pool = &(tbl->pools[0]); in iommu_range_alloc() 254 1 << tbl->it_page_shift); in iommu_range_alloc() 256 boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift); in iommu_range_alloc() 259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc() [all …]
|
| D | dma-iommu.c | 78 struct iommu_table *tbl = get_iommu_table_base(dev); in dma_iommu_dma_supported() local 80 if (!tbl) { in dma_iommu_dma_supported() 86 if (tbl->it_offset > (mask >> tbl->it_page_shift)) { in dma_iommu_dma_supported() 89 mask, tbl->it_offset << tbl->it_page_shift); in dma_iommu_dma_supported() 97 struct iommu_table *tbl = get_iommu_table_base(dev); in dma_iommu_get_required_mask() local 99 if (!tbl) in dma_iommu_get_required_mask() 102 mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); in dma_iommu_get_required_mask()
|
| D | vio.c | 521 struct iommu_table *tbl; in vio_dma_iommu_map_page() local 524 tbl = get_iommu_table_base(dev); in vio_dma_iommu_map_page() 525 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) { in vio_dma_iommu_map_page() 532 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); in vio_dma_iommu_map_page() 545 struct iommu_table *tbl; in vio_dma_iommu_unmap_page() local 547 tbl = get_iommu_table_base(dev); in vio_dma_iommu_unmap_page() 550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); in vio_dma_iommu_unmap_page() 558 struct iommu_table *tbl; in vio_dma_iommu_map_sg() local 563 tbl = get_iommu_table_base(dev); in vio_dma_iommu_map_sg() 565 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); in vio_dma_iommu_map_sg() [all …]
|
| D | asm-offsets.c | 753 arch.timing_exit.tv32.tbl)); in main() 757 arch.timing_last_enter.tv32.tbl)); in main()
|
| /linux-4.4.14/include/linux/ |
| D | rhashtable.h | 145 struct bucket_table __rcu *tbl; member 162 struct bucket_table *tbl; member 205 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, in rht_bucket_index() argument 208 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); in rht_bucket_index() 212 struct rhashtable *ht, const struct bucket_table *tbl, in rht_key_hashfn() argument 219 hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd); in rht_key_hashfn() 224 hash = params.hashfn(key, key_len, tbl->hash_rnd); in rht_key_hashfn() 226 hash = jhash(key, key_len, tbl->hash_rnd); in rht_key_hashfn() 229 tbl->hash_rnd); in rht_key_hashfn() 234 hash = params.hashfn(key, key_len, tbl->hash_rnd); in rht_key_hashfn() [all …]
|
| /linux-4.4.14/lib/ |
| D | rhashtable.c | 36 const struct bucket_table *tbl, in head_hashfn() argument 39 return rht_head_hashfn(ht, tbl, he, ht->p); in head_hashfn() 51 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) in lockdep_rht_bucket_is_held() argument 53 spinlock_t *lock = rht_bucket_lock(tbl, hash); in lockdep_rht_bucket_is_held() 63 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, in alloc_bucket_locks() argument 77 size = min_t(unsigned int, size, tbl->size >> 1); in alloc_bucket_locks() 83 tbl->locks = vmalloc(size * sizeof(spinlock_t)); in alloc_bucket_locks() 86 tbl->locks = kmalloc_array(size, sizeof(spinlock_t), in alloc_bucket_locks() 88 if (!tbl->locks) in alloc_bucket_locks() 91 spin_lock_init(&tbl->locks[i]); in alloc_bucket_locks() [all …]
|
| D | devres.c | 288 void __iomem **tbl; in pcim_iomap() local 292 tbl = (void __iomem **)pcim_iomap_table(pdev); in pcim_iomap() 293 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ in pcim_iomap() 296 tbl[bar] = pci_iomap(pdev, bar, maxlen); in pcim_iomap() 297 return tbl[bar]; in pcim_iomap() 310 void __iomem **tbl; in pcim_iounmap() local 315 tbl = (void __iomem **)pcim_iomap_table(pdev); in pcim_iounmap() 316 BUG_ON(!tbl); in pcim_iounmap() 319 if (tbl[i] == addr) { in pcim_iounmap() 320 tbl[i] = NULL; in pcim_iounmap()
|
| D | iommu-common.c | 228 static struct iommu_pool *get_pool(struct iommu_map_table *tbl, in get_pool() argument 232 unsigned long largepool_start = tbl->large_pool.start; in get_pool() 233 bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0); in get_pool() 237 p = &tbl->large_pool; in get_pool() 239 unsigned int pool_nr = entry / tbl->poolsize; in get_pool() 241 BUG_ON(pool_nr >= tbl->nr_pools); in get_pool() 242 p = &tbl->pools[pool_nr]; in get_pool()
|
| /linux-4.4.14/drivers/net/wireless/iwlwifi/dvm/ |
| D | rs.c | 453 static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) in get_expected_tpt() argument 455 if (tbl->expected_tpt) in get_expected_tpt() 456 return tbl->expected_tpt[rs_index]; in get_expected_tpt() 467 static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, in rs_collect_tx_data() argument 478 window = &(tbl->win[scale_index]); in rs_collect_tx_data() 481 tpt = get_expected_tpt(tbl, scale_index); in rs_collect_tx_data() 546 struct iwl_scale_tbl_info *tbl, in rate_n_flags_from_tbl() argument 551 if (is_legacy(tbl->lq_type)) { in rate_n_flags_from_tbl() 556 } else if (is_Ht(tbl->lq_type)) { in rate_n_flags_from_tbl() 563 if (is_siso(tbl->lq_type)) in rate_n_flags_from_tbl() [all …]
|
| D | rs.h | 276 #define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) argument 277 #define is_siso(tbl) ((tbl) == LQ_SISO) argument 278 #define is_mimo2(tbl) ((tbl) == LQ_MIMO2) argument 279 #define is_mimo3(tbl) ((tbl) == LQ_MIMO3) argument 280 #define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl)) argument 281 #define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) argument 282 #define is_a_band(tbl) ((tbl) == LQ_A) argument 283 #define is_g_and(tbl) ((tbl) == LQ_G) argument
|
| D | calib.c | 432 __le16 *tbl) in iwl_prepare_legacy_sensitivity_tbl() argument 434 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 436 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 438 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 440 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 443 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 445 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 448 tbl[HD_MIN_ENERGY_CCK_DET_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 450 tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() 453 tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = in iwl_prepare_legacy_sensitivity_tbl() [all …]
|
| /linux-4.4.14/arch/x86/kernel/ |
| D | pci-calgary_64.c | 174 static void calgary_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev); 175 static void calgary_tce_cache_blast(struct iommu_table *tbl); 176 static void calgary_dump_error_regs(struct iommu_table *tbl); 177 static void calioc2_handle_quirks(struct iommu_table *tbl, struct pci_dev *dev); 178 static void calioc2_tce_cache_blast(struct iommu_table *tbl); 179 static void calioc2_dump_error_regs(struct iommu_table *tbl); 180 static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl); 197 static inline int translation_enabled(struct iommu_table *tbl) in translation_enabled() argument 200 return (tbl != NULL); in translation_enabled() 203 static void iommu_range_reserve(struct iommu_table *tbl, in iommu_range_reserve() argument [all …]
|
| D | tce_64.c | 49 void tce_build(struct iommu_table *tbl, unsigned long index, in tce_build() argument 60 tp = ((u64*)tbl->it_base) + index; in tce_build() 75 void tce_free(struct iommu_table *tbl, long index, unsigned int npages) in tce_free() argument 79 tp = ((u64*)tbl->it_base) + index; in tce_free() 98 static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl) in tce_table_setparms() argument 104 tbl->it_busno = dev->bus->number; in tce_table_setparms() 107 tbl->it_size = table_size_to_number_of_entries(specified_table_size); in tce_table_setparms() 113 bitmapsz = tbl->it_size / BITS_PER_BYTE; in tce_table_setparms() 121 tbl->it_map = (unsigned long*)bmppages; in tce_table_setparms() 123 memset(tbl->it_map, 0, bitmapsz); in tce_table_setparms() [all …]
|
| /linux-4.4.14/drivers/net/wireless/mwifiex/ |
| D | 11n_rxreorder.c | 89 struct mwifiex_rx_reorder_tbl *tbl, in mwifiex_11n_dispatch_pkt_until_start_win() argument 96 pkt_to_send = (start_win > tbl->start_win) ? in mwifiex_11n_dispatch_pkt_until_start_win() 97 min((start_win - tbl->start_win), tbl->win_size) : in mwifiex_11n_dispatch_pkt_until_start_win() 98 tbl->win_size; in mwifiex_11n_dispatch_pkt_until_start_win() 103 if (tbl->rx_reorder_ptr[i]) { in mwifiex_11n_dispatch_pkt_until_start_win() 104 rx_tmp_ptr = tbl->rx_reorder_ptr[i]; in mwifiex_11n_dispatch_pkt_until_start_win() 105 tbl->rx_reorder_ptr[i] = NULL; in mwifiex_11n_dispatch_pkt_until_start_win() 117 for (i = 0; i < tbl->win_size - pkt_to_send; ++i) { in mwifiex_11n_dispatch_pkt_until_start_win() 118 tbl->rx_reorder_ptr[i] = tbl->rx_reorder_ptr[pkt_to_send + i]; in mwifiex_11n_dispatch_pkt_until_start_win() 119 tbl->rx_reorder_ptr[pkt_to_send + i] = NULL; in mwifiex_11n_dispatch_pkt_until_start_win() [all …]
|
| D | init.c | 38 struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl; in mwifiex_add_bss_prio_tbl() local 48 spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags); in mwifiex_add_bss_prio_tbl() 49 list_add_tail(&bss_prio->list, &tbl[priv->bss_priority].bss_prio_head); in mwifiex_add_bss_prio_tbl() 50 spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags); in mwifiex_add_bss_prio_tbl()
|
| D | 11n.c | 760 struct mwifiex_tx_ba_stream_tbl *tbl, *tmp; in mwifiex_del_tx_ba_stream_tbl_by_ra() local 767 list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list) { in mwifiex_del_tx_ba_stream_tbl_by_ra() 768 if (!memcmp(tbl->ra, ra, ETH_ALEN)) { in mwifiex_del_tx_ba_stream_tbl_by_ra() 771 mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl); in mwifiex_del_tx_ba_stream_tbl_by_ra()
|
| D | wmm.c | 1166 struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl; in mwifiex_rotate_priolists() local 1170 spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags); in mwifiex_rotate_priolists() 1175 list_move(&tbl[priv->bss_priority].bss_prio_head, in mwifiex_rotate_priolists() 1176 &tbl[priv->bss_priority].bss_prio_cur->list); in mwifiex_rotate_priolists() 1177 spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags); in mwifiex_rotate_priolists()
|
| /linux-4.4.14/net/netfilter/ipvs/ |
| D | ip_vs_lblc.c | 171 ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) in ip_vs_lblc_hash() argument 175 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblc_hash() 176 atomic_inc(&tbl->entries); in ip_vs_lblc_hash() 182 ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl, in ip_vs_lblc_get() argument 188 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblc_get() 201 ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr, in ip_vs_lblc_new() argument 206 en = ip_vs_lblc_get(af, tbl, daddr); in ip_vs_lblc_new() 223 ip_vs_lblc_hash(tbl, en); in ip_vs_lblc_new() 234 struct ip_vs_lblc_table *tbl = svc->sched_data; in ip_vs_lblc_flush() local 240 tbl->dead = 1; in ip_vs_lblc_flush() [all …]
|
| D | ip_vs_lblcr.c | 334 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) in ip_vs_lblcr_hash() argument 338 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]); in ip_vs_lblcr_hash() 339 atomic_inc(&tbl->entries); in ip_vs_lblcr_hash() 345 ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, in ip_vs_lblcr_get() argument 351 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list) in ip_vs_lblcr_get() 364 ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr, in ip_vs_lblcr_new() argument 369 en = ip_vs_lblcr_get(af, tbl, daddr); in ip_vs_lblcr_new() 385 ip_vs_lblcr_hash(tbl, en); in ip_vs_lblcr_new() 400 struct ip_vs_lblcr_table *tbl = svc->sched_data; in ip_vs_lblcr_flush() local 406 tbl->dead = 1; in ip_vs_lblcr_flush() [all …]
|
| D | ip_vs_ctl.c | 3843 struct ctl_table *tbl; in ip_vs_control_net_init_sysctl() local 3851 tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL); in ip_vs_control_net_init_sysctl() 3852 if (tbl == NULL) in ip_vs_control_net_init_sysctl() 3857 tbl[0].procname = NULL; in ip_vs_control_net_init_sysctl() 3859 tbl = vs_vars; in ip_vs_control_net_init_sysctl() 3862 if (tbl[idx].proc_handler == proc_do_defense_mode) in ip_vs_control_net_init_sysctl() 3863 tbl[idx].extra2 = ipvs; in ip_vs_control_net_init_sysctl() 3867 tbl[idx++].data = &ipvs->sysctl_amemthresh; in ip_vs_control_net_init_sysctl() 3869 tbl[idx++].data = &ipvs->sysctl_am_droprate; in ip_vs_control_net_init_sysctl() 3870 tbl[idx++].data = &ipvs->sysctl_drop_entry; in ip_vs_control_net_init_sysctl() [all …]
|
| /linux-4.4.14/net/core/ |
| D | neighbour.c | 57 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); 119 static int neigh_forced_gc(struct neigh_table *tbl) in neigh_forced_gc() argument 125 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs); in neigh_forced_gc() 127 write_lock_bh(&tbl->lock); in neigh_forced_gc() 128 nht = rcu_dereference_protected(tbl->nht, in neigh_forced_gc() 129 lockdep_is_held(&tbl->lock)); in neigh_forced_gc() 136 lockdep_is_held(&tbl->lock))) != NULL) { in neigh_forced_gc() 146 lockdep_is_held(&tbl->lock))); in neigh_forced_gc() 158 tbl->last_flush = jiffies; in neigh_forced_gc() 160 write_unlock_bh(&tbl->lock); in neigh_forced_gc() [all …]
|
| D | sysctl_net_core.c | 207 struct ctl_table tbl = { in set_default_qdisc() local 215 ret = proc_dostring(&tbl, write, buffer, lenp, ppos); in set_default_qdisc() 422 struct ctl_table *tbl; in sysctl_core_net_init() local 426 tbl = netns_core_table; in sysctl_core_net_init() 428 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); in sysctl_core_net_init() 429 if (tbl == NULL) in sysctl_core_net_init() 432 tbl[0].data = &net->core.sysctl_somaxconn; in sysctl_core_net_init() 436 tbl[0].procname = NULL; in sysctl_core_net_init() 440 net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl); in sysctl_core_net_init() 447 if (tbl != netns_core_table) in sysctl_core_net_init() [all …]
|
| /linux-4.4.14/drivers/vfio/ |
| D | vfio_iommu_spapr_tce.c | 140 static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl) in tce_iommu_userspace_view_alloc() argument 142 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * in tce_iommu_userspace_view_alloc() 143 tbl->it_size, PAGE_SIZE); in tce_iommu_userspace_view_alloc() 147 BUG_ON(tbl->it_userspace); in tce_iommu_userspace_view_alloc() 158 tbl->it_userspace = uas; in tce_iommu_userspace_view_alloc() 163 static void tce_iommu_userspace_view_free(struct iommu_table *tbl) in tce_iommu_userspace_view_free() argument 165 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) * in tce_iommu_userspace_view_free() 166 tbl->it_size, PAGE_SIZE); in tce_iommu_userspace_view_free() 168 if (!tbl->it_userspace) in tce_iommu_userspace_view_free() 171 vfree(tbl->it_userspace); in tce_iommu_userspace_view_free() [all …]
|
| /linux-4.4.14/drivers/net/wireless/iwlegacy/ |
| D | 4965-rs.c | 404 il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx) in il4965_get_expected_tpt() argument 406 if (tbl->expected_tpt) in il4965_get_expected_tpt() 407 return tbl->expected_tpt[rs_idx]; in il4965_get_expected_tpt() 419 il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx, in il4965_rs_collect_tx_data() argument 430 win = &(tbl->win[scale_idx]); in il4965_rs_collect_tx_data() 433 tpt = il4965_get_expected_tpt(tbl, scale_idx); in il4965_rs_collect_tx_data() 497 il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl, in il4965_rate_n_flags_from_tbl() argument 502 if (is_legacy(tbl->lq_type)) { in il4965_rate_n_flags_from_tbl() 507 } else if (is_Ht(tbl->lq_type)) { in il4965_rate_n_flags_from_tbl() 514 if (is_siso(tbl->lq_type)) in il4965_rate_n_flags_from_tbl() [all …]
|
| D | 4965-calib.c | 357 __le16 *tbl) in il4965_prepare_legacy_sensitivity_tbl() argument 359 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] = in il4965_prepare_legacy_sensitivity_tbl() 361 tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] = in il4965_prepare_legacy_sensitivity_tbl() 363 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] = in il4965_prepare_legacy_sensitivity_tbl() 365 tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] = in il4965_prepare_legacy_sensitivity_tbl() 368 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] = in il4965_prepare_legacy_sensitivity_tbl() 370 tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] = in il4965_prepare_legacy_sensitivity_tbl() 373 tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck); in il4965_prepare_legacy_sensitivity_tbl() 374 tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm); in il4965_prepare_legacy_sensitivity_tbl() 376 tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] = in il4965_prepare_legacy_sensitivity_tbl() [all …]
|
| D | common.h | 2726 #define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A) argument 2727 #define is_siso(tbl) ((tbl) == LQ_SISO) argument 2728 #define is_mimo2(tbl) ((tbl) == LQ_MIMO2) argument 2729 #define is_mimo(tbl) (is_mimo2(tbl)) argument 2730 #define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) argument 2731 #define is_a_band(tbl) ((tbl) == LQ_A) argument 2732 #define is_g_and(tbl) ((tbl) == LQ_G) argument
|
| /linux-4.4.14/net/mac80211/ |
| D | mesh_pathtbl.c | 76 #define for_each_mesh_entry(tbl, node, i) \ argument 77 for (i = 0; i <= tbl->hash_mask; i++) \ 78 hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list) 118 static void __mesh_table_free(struct mesh_table *tbl) in __mesh_table_free() argument 120 kfree(tbl->hash_buckets); in __mesh_table_free() 121 kfree(tbl->hashwlock); in __mesh_table_free() 122 kfree(tbl); in __mesh_table_free() 125 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) in mesh_table_free() argument 132 mesh_hash = tbl->hash_buckets; in mesh_table_free() 133 for (i = 0; i <= tbl->hash_mask; i++) { in mesh_table_free() [all …]
|
| D | sta_info.h | 601 #define for_each_sta_info(local, tbl, _addr, _sta, _tmp) \ argument 602 rht_for_each_entry_rcu(_sta, _tmp, tbl, \ 603 _sta_bucket_idx(tbl, _addr), \
|
| D | sta_info.c | 164 const struct bucket_table *tbl; in sta_info_get() local 167 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); in sta_info_get() 169 for_each_sta_info(local, tbl, addr, sta, tmp) { in sta_info_get() 192 const struct bucket_table *tbl; in sta_info_get_bss() local 195 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); in sta_info_get_bss() 197 for_each_sta_info(local, tbl, addr, sta, tmp) { in sta_info_get_bss() 1102 const struct bucket_table *tbl; in ieee80211_find_sta_by_ifaddr() local 1104 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); in ieee80211_find_sta_by_ifaddr() 1110 for_each_sta_info(local, tbl, addr, sta, tmp) { in ieee80211_find_sta_by_ifaddr()
|
| D | status.c | 747 const struct bucket_table *tbl; in ieee80211_tx_status() local 756 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); in ieee80211_tx_status() 758 for_each_sta_info(local, tbl, hdr->addr1, sta, tmp) { in ieee80211_tx_status()
|
| D | rx.c | 3496 const struct bucket_table *tbl; in __ieee80211_rx_handle_packet() local 3500 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); in __ieee80211_rx_handle_packet() 3502 for_each_sta_info(local, tbl, hdr->addr2, sta, tmp) { in __ieee80211_rx_handle_packet()
|
| /linux-4.4.14/net/netfilter/ |
| D | xt_repldata.h | 23 } *tbl; \ 25 size_t term_offset = (offsetof(typeof(*tbl), entries[nhooks]) + \ 27 tbl = kzalloc(term_offset + sizeof(*term), GFP_KERNEL); \ 28 if (tbl == NULL) \ 30 term = (struct type##_error *)&(((char *)tbl)[term_offset]); \ 31 strncpy(tbl->repl.name, info->name, sizeof(tbl->repl.name)); \ 33 tbl->repl.valid_hooks = hook_mask; \ 34 tbl->repl.num_entries = nhooks + 1; \ 35 tbl->repl.size = nhooks * sizeof(struct type##_standard) + \ 40 tbl->repl.hook_entry[hooknum] = bytes; \ [all …]
|
| /linux-4.4.14/arch/powerpc/include/asm/ |
| D | iommu.h | 52 int (*set)(struct iommu_table *tbl, 63 int (*exchange)(struct iommu_table *tbl, 68 void (*clear)(struct iommu_table *tbl, 71 unsigned long (*get)(struct iommu_table *tbl, long index); 72 void (*flush)(struct iommu_table *tbl); 73 void (*free)(struct iommu_table *tbl); 119 #define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \ argument 120 ((tbl)->it_userspace ? \ 121 &((tbl)->it_userspace[(entry) - (tbl)->it_offset]) : \ 126 int get_iommu_order(unsigned long size, struct iommu_table *tbl) in get_iommu_order() argument [all …]
|
| D | time.h | 66 unsigned long tbl; in get_tbl() local 67 asm volatile("mfspr %0, 0x3dd" : "=r" (tbl)); in get_tbl() 68 return tbl; in get_tbl()
|
| D | kvm_host.h | 168 u32 tbu, tbl; member
|
| /linux-4.4.14/arch/powerpc/platforms/pseries/ |
| D | iommu.c | 59 struct iommu_table *tbl = NULL; in iommu_pseries_alloc_group() local 67 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node); in iommu_pseries_alloc_group() 68 if (!tbl) in iommu_pseries_alloc_group() 76 INIT_LIST_HEAD_RCU(&tbl->it_group_list); in iommu_pseries_alloc_group() 78 list_add_rcu(&tgl->next, &tbl->it_group_list); in iommu_pseries_alloc_group() 80 table_group->tables[0] = tbl; in iommu_pseries_alloc_group() 87 kfree(tbl); in iommu_pseries_alloc_group() 95 struct iommu_table *tbl; in iommu_pseries_free_group() local 103 tbl = table_group->tables[0]; in iommu_pseries_free_group() 105 tgl = list_first_entry_or_null(&tbl->it_group_list, in iommu_pseries_free_group() [all …]
|
| /linux-4.4.14/include/net/ |
| D | neighbour.h | 74 struct neigh_table *tbl; member 131 #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) argument 135 struct neigh_table *tbl; member 232 return p->tbl->family; in neigh_parms_family() 240 return (char *)n + n->tbl->entry_size; in neighbour_priv() 271 struct neigh_table *tbl, in ___neigh_lookup_noref() argument 279 struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht); in ___neigh_lookup_noref() 294 static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl, in __neigh_lookup_noref() argument 298 return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev); in __neigh_lookup_noref() 301 void neigh_table_init(int index, struct neigh_table *tbl); [all …]
|
| D | udp.h | 261 struct udp_table *tbl); 269 int dif, struct udp_table *tbl);
|
| /linux-4.4.14/arch/unicore32/mm/ |
| D | proc-macros.S | 97 .macro va2pa, va, pa, tbl, msk, off, err=990f 100 adr \tbl, 910f @ tbl <- table of 1st page table 107 add \tbl, \tbl, \off << #3 @ cmove table pointer 108 ldw \msk, [\tbl+], #0 @ get the mask 109 ldw pc, [\tbl+], #4 113 cntlo \tbl, \msk @ use tbl as temp reg 114 mov \off, \off >> \tbl 116 adr \tbl, 920f @ tbl <- table of 2nd pt 130 andn \tbl, \va, \msk 132 or \pa, \pa, \tbl
|
| /linux-4.4.14/net/sctp/ |
| D | sysctl.c | 320 struct ctl_table tbl; in proc_sctp_do_hmac_alg() local 326 memset(&tbl, 0, sizeof(struct ctl_table)); in proc_sctp_do_hmac_alg() 329 tbl.data = tmp; in proc_sctp_do_hmac_alg() 330 tbl.maxlen = sizeof(tmp); in proc_sctp_do_hmac_alg() 332 tbl.data = net->sctp.sctp_hmac_alg ? : none; in proc_sctp_do_hmac_alg() 333 tbl.maxlen = strlen(tbl.data); in proc_sctp_do_hmac_alg() 336 ret = proc_dostring(&tbl, write, buffer, lenp, ppos); in proc_sctp_do_hmac_alg() 368 struct ctl_table tbl; in proc_sctp_do_rto_min() local 371 memset(&tbl, 0, sizeof(struct ctl_table)); in proc_sctp_do_rto_min() 372 tbl.maxlen = sizeof(unsigned int); in proc_sctp_do_rto_min() [all …]
|
| /linux-4.4.14/scripts/dtc/ |
| D | livetree.c | 597 struct reserve_info *ri, **tbl; in sort_reserve_entries() local 608 tbl = xmalloc(n * sizeof(*tbl)); in sort_reserve_entries() 613 tbl[i++] = ri; in sort_reserve_entries() 615 qsort(tbl, n, sizeof(*tbl), cmp_reserve_info); in sort_reserve_entries() 617 bi->reservelist = tbl[0]; in sort_reserve_entries() 619 tbl[i]->next = tbl[i+1]; in sort_reserve_entries() 620 tbl[n-1]->next = NULL; in sort_reserve_entries() 622 free(tbl); in sort_reserve_entries() 638 struct property *prop, **tbl; in sort_properties() local 646 tbl = xmalloc(n * sizeof(*tbl)); in sort_properties() [all …]
|
| /linux-4.4.14/arch/powerpc/platforms/powernv/ |
| D | pci.c | 576 static __be64 *pnv_tce(struct iommu_table *tbl, long idx) in pnv_tce() argument 578 __be64 *tmp = ((__be64 *)tbl->it_base); in pnv_tce() 579 int level = tbl->it_indirect_levels; in pnv_tce() 580 const long shift = ilog2(tbl->it_level_size); in pnv_tce() 581 unsigned long mask = (tbl->it_level_size - 1) << (level * shift); in pnv_tce() 596 int pnv_tce_build(struct iommu_table *tbl, long index, long npages, in pnv_tce_build() argument 601 u64 rpn = __pa(uaddr) >> tbl->it_page_shift; in pnv_tce_build() 609 ((rpn + i) << tbl->it_page_shift); in pnv_tce_build() 610 unsigned long idx = index - tbl->it_offset + i; in pnv_tce_build() 612 *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce); in pnv_tce_build() [all …]
|
| D | pci-p5ioc2.c | 98 struct iommu_table *tbl = phb->p5ioc2.table_group.tables[0]; in pnv_pci_p5ioc2_dma_dev_setup() local 100 if (!tbl->it_map) { in pnv_pci_p5ioc2_dma_dev_setup() 101 tbl->it_ops = &pnv_p5ioc2_iommu_ops; in pnv_pci_p5ioc2_dma_dev_setup() 102 iommu_init_table(tbl, phb->hose->node); in pnv_pci_p5ioc2_dma_dev_setup() 105 INIT_LIST_HEAD_RCU(&tbl->it_group_list); in pnv_pci_p5ioc2_dma_dev_setup() 107 tbl, &phb->p5ioc2.table_group); in pnv_pci_p5ioc2_dma_dev_setup() 110 set_iommu_table_base(&pdev->dev, tbl); in pnv_pci_p5ioc2_dma_dev_setup() 131 struct iommu_table *tbl; in pnv_pci_init_p5ioc2_phb() local 206 tbl = phb->p5ioc2.table_group.tables[0] = &phb->p5ioc2.iommu_table; in pnv_pci_init_p5ioc2_phb() 208 table_group->tce32_start = tbl->it_offset << tbl->it_page_shift; in pnv_pci_init_p5ioc2_phb() [all …]
|
| D | pci-ioda.c | 57 static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl); 1267 struct iommu_table *tbl; in pnv_pci_ioda2_release_dma_pe() local 1270 tbl = pe->table_group.tables[0]; in pnv_pci_ioda2_release_dma_pe() 1280 pnv_pci_ioda2_table_free_pages(tbl); in pnv_pci_ioda2_release_dma_pe() 1281 iommu_free_table(tbl, of_node_full_name(dev->dev.of_node)); in pnv_pci_ioda2_release_dma_pe() 1640 static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl, in pnv_pci_ioda1_tce_invalidate() argument 1644 &tbl->it_group_list, struct iommu_table_group_link, in pnv_pci_ioda1_tce_invalidate() 1652 const unsigned shift = tbl->it_page_shift; in pnv_pci_ioda1_tce_invalidate() 1654 start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset); in pnv_pci_ioda1_tce_invalidate() 1655 end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset + in pnv_pci_ioda1_tce_invalidate() [all …]
|
| D | pci.h | 205 extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, 208 extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages); 209 extern int pnv_tce_xchg(struct iommu_table *tbl, long index, 211 extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index); 222 struct iommu_table *tbl, 224 extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl, 226 extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl, 232 extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
|
| /linux-4.4.14/drivers/hwmon/ |
| D | ab8500.c | 60 const struct abx500_res_to_temp *tbl = cfg->temp_tbl; in ab8500_voltage_to_temp() local 66 if (r_ntc > tbl[0].resist || r_ntc < tbl[tbl_sz - 1].resist) in ab8500_voltage_to_temp() 69 while (!(r_ntc <= tbl[i].resist && r_ntc > tbl[i + 1].resist) && in ab8500_voltage_to_temp() 74 *temp = tbl[i].temp * 1000 + ((tbl[i + 1].temp - tbl[i].temp) * 1000 * in ab8500_voltage_to_temp() 75 (r_ntc - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist); in ab8500_voltage_to_temp()
|
| /linux-4.4.14/arch/powerpc/kvm/ |
| D | book3s_64_vio_hv.c | 57 u64 *tbl; in kvmppc_h_put_tce() local 65 tbl = (u64 *)page_address(page); in kvmppc_h_put_tce() 69 tbl[idx % TCES_PER_PAGE] = tce; in kvmppc_h_put_tce() 89 u64 *tbl; in kvmppc_h_get_tce() local 95 tbl = (u64 *)page_address(page); in kvmppc_h_get_tce() 97 vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE]; in kvmppc_h_get_tce()
|
| /linux-4.4.14/arch/sparc/kernel/ |
| D | iommu.c | 51 struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl); in iommu_flushall() 104 iommu->tbl.table_map_base = dma_offset; in iommu_table_init() 110 iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node); in iommu_table_init() 111 if (!iommu->tbl.map) in iommu_table_init() 113 memset(iommu->tbl.map, 0, sz); in iommu_table_init() 115 iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT, in iommu_table_init() 150 kfree(iommu->tbl.map); in iommu_table_init() 151 iommu->tbl.map = NULL; in iommu_table_init() 162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages() 230 *dma_addrp = (iommu->tbl.table_map_base + in dma_4u_alloc_coherent() [all …]
|
| D | pci_sun4v.c | 159 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in dma_4v_alloc_coherent() 165 *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); in dma_4v_alloc_coherent() 190 iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); in dma_4v_alloc_coherent() 227 entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); in dma_4v_free_coherent() 229 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4v_free_coherent() 256 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in dma_4v_map_page() 262 bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); in dma_4v_map_page() 291 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); in dma_4v_map_page() 318 entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; in dma_4v_unmap_page() 320 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); in dma_4v_unmap_page() [all …]
|
| D | head_32.S | 314 sll %g1, 0x8, %g1 ! make phys addr for l1 tbl 338 sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
|
| /linux-4.4.14/net/ipv4/ |
| D | sysctl_net_ipv4.c | 159 struct ctl_table tbl = { in proc_tcp_congestion_control() local 167 ret = proc_dostring(&tbl, write, buffer, lenp, ppos); in proc_tcp_congestion_control() 178 struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, }; in proc_tcp_available_congestion_control() local 181 tbl.data = kmalloc(tbl.maxlen, GFP_USER); in proc_tcp_available_congestion_control() 182 if (!tbl.data) in proc_tcp_available_congestion_control() 184 tcp_get_available_congestion_control(tbl.data, TCP_CA_BUF_MAX); in proc_tcp_available_congestion_control() 185 ret = proc_dostring(&tbl, write, buffer, lenp, ppos); in proc_tcp_available_congestion_control() 186 kfree(tbl.data); in proc_tcp_available_congestion_control() 195 struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX }; in proc_allowed_congestion_control() local 198 tbl.data = kmalloc(tbl.maxlen, GFP_USER); in proc_allowed_congestion_control() [all …]
|
| D | fib_rules.c | 78 struct fib_table *tbl; in fib4_rule_action() local 97 tbl = fib_get_table(rule->fr_net, rule->table); in fib4_rule_action() 98 if (tbl) in fib4_rule_action() 99 err = fib_table_lookup(tbl, &flp->u.ip4, in fib4_rule_action()
|
| D | udp_diag.c | 34 static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, in udp_dump_one() argument 47 req->id.idiag_if, tbl); in udp_dump_one() 55 req->id.idiag_if, tbl); in udp_dump_one()
|
| D | devinet.c | 2289 struct ctl_table *tbl = ctl_forward_entry; in devinet_init_net() local 2307 tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL); in devinet_init_net() 2308 if (!tbl) in devinet_init_net() 2311 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1]; in devinet_init_net() 2312 tbl[0].extra1 = all; in devinet_init_net() 2313 tbl[0].extra2 = net; in devinet_init_net() 2327 forw_hdr = register_net_sysctl(net, "net/ipv4", tbl); in devinet_init_net() 2343 if (tbl != ctl_forward_entry) in devinet_init_net() 2344 kfree(tbl); in devinet_init_net() 2359 struct ctl_table *tbl; in devinet_exit_net() local [all …]
|
| D | route.c | 2776 struct ctl_table *tbl; in sysctl_route_net_init() local 2778 tbl = ipv4_route_flush_table; in sysctl_route_net_init() 2780 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL); in sysctl_route_net_init() 2781 if (!tbl) in sysctl_route_net_init() 2786 tbl[0].procname = NULL; in sysctl_route_net_init() 2788 tbl[0].extra1 = net; in sysctl_route_net_init() 2790 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl); in sysctl_route_net_init() 2796 if (tbl != ipv4_route_flush_table) in sysctl_route_net_init() 2797 kfree(tbl); in sysctl_route_net_init() 2804 struct ctl_table *tbl; in sysctl_route_net_exit() local [all …]
|
| D | fib_semantics.c | 757 struct fib_table *tbl = NULL; in fib_check_nh() local 770 tbl = fib_get_table(net, cfg->fc_table); in fib_check_nh() 772 if (tbl) in fib_check_nh() 773 err = fib_table_lookup(tbl, &fl4, &res, in fib_check_nh() 781 if (!tbl || err) { in fib_check_nh()
|
| D | arp.c | 165 .tbl = &arp_tbl,
|
| /linux-4.4.14/drivers/clk/tegra/ |
| D | clk.c | 232 void __init tegra_init_from_table(struct tegra_clk_init_table *tbl, in tegra_init_from_table() argument 237 for (; tbl->clk_id < clk_max; tbl++) { in tegra_init_from_table() 238 clk = clks[tbl->clk_id]; in tegra_init_from_table() 241 __func__, PTR_ERR(clk), tbl->clk_id); in tegra_init_from_table() 247 if (tbl->parent_id < clk_max) { in tegra_init_from_table() 248 struct clk *parent = clks[tbl->parent_id]; in tegra_init_from_table() 257 if (tbl->rate) in tegra_init_from_table() 258 if (clk_set_rate(clk, tbl->rate)) { in tegra_init_from_table() 260 __func__, tbl->rate, in tegra_init_from_table() 265 if (tbl->state) in tegra_init_from_table()
|
| D | clk.h | 624 void tegra_init_from_table(struct tegra_clk_init_table *tbl,
|
| /linux-4.4.14/drivers/net/wireless/iwlwifi/mvm/ |
| D | rs.c | 586 struct iwl_scale_tbl_info *tbl) in rs_rate_scale_clear_tbl_windows() argument 592 rs_rate_scale_clear_window(&tbl->win[i]); in rs_rate_scale_clear_tbl_windows() 594 for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++) in rs_rate_scale_clear_tbl_windows() 595 rs_rate_scale_clear_window(&tbl->tpc_win[i]); in rs_rate_scale_clear_tbl_windows() 647 static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) in get_expected_tpt() argument 649 if (tbl->expected_tpt) in get_expected_tpt() 650 return tbl->expected_tpt[rs_index]; in get_expected_tpt() 662 struct iwl_scale_tbl_info *tbl, in _rs_collect_tx_data() argument 670 tpt = get_expected_tpt(tbl, scale_index); in _rs_collect_tx_data() 728 struct iwl_scale_tbl_info *tbl, in rs_collect_tx_data() argument [all …]
|
| /linux-4.4.14/drivers/media/usb/gspca/gl860/ |
| D | gl860.c | 592 int fetch_validx(struct gspca_dev *gspca_dev, struct validx *tbl, int len) in fetch_validx() argument 597 if (tbl[n].idx != 0xffff) in fetch_validx() 598 ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, in fetch_validx() 599 tbl[n].idx, 0, NULL); in fetch_validx() 600 else if (tbl[n].val == 0xffff) in fetch_validx() 603 msleep(tbl[n].val); in fetch_validx() 608 int keep_on_fetching_validx(struct gspca_dev *gspca_dev, struct validx *tbl, in keep_on_fetching_validx() argument 612 if (tbl[n].idx != 0xffff) in keep_on_fetching_validx() 613 ctrl_out(gspca_dev, 0x40, 1, tbl[n].val, tbl[n].idx, in keep_on_fetching_validx() 615 else if (tbl[n].val == 0xffff) in keep_on_fetching_validx() [all …]
|
| D | gl860.h | 91 int fetch_validx(struct gspca_dev *gspca_dev, struct validx *tbl, int len); 92 int keep_on_fetching_validx(struct gspca_dev *gspca_dev, struct validx *tbl, 94 void fetch_idxdata(struct gspca_dev *gspca_dev, struct idxdata *tbl, int len);
|
| D | gl860-ov9655.c | 216 u8 **tbl; in ov9655_init_post_alt() local 220 tbl = (reso == IMAGE_640) ? tbl_640 : tbl_1280; in ov9655_init_post_alt() 223 tbl_length[0], tbl[0]); in ov9655_init_post_alt() 226 tbl_length[i], tbl[i]); in ov9655_init_post_alt() 228 tbl_length[7], tbl[7]); in ov9655_init_post_alt()
|
| /linux-4.4.14/arch/x86/include/asm/ |
| D | tce.h | 41 extern void tce_build(struct iommu_table *tbl, unsigned long index, 43 extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); 45 extern void __init free_tce_table(void *tbl);
|
| D | calgary.h | 47 void (*handle_quirks)(struct iommu_table *tbl, struct pci_dev *dev); 48 void (*tce_cache_blast)(struct iommu_table *tbl); 49 void (*dump_error_regs)(struct iommu_table *tbl);
|
| /linux-4.4.14/arch/powerpc/platforms/pasemi/ |
| D | iommu.c | 88 static int iobmap_build(struct iommu_table *tbl, long index, in iobmap_build() argument 99 bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT; in iobmap_build() 101 ip = ((u32 *)tbl->it_base) + index; in iobmap_build() 117 static void iobmap_free(struct iommu_table *tbl, long index, in iobmap_free() argument 125 bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT; in iobmap_free() 127 ip = ((u32 *)tbl->it_base) + index; in iobmap_free()
|
| /linux-4.4.14/net/openvswitch/ |
| D | flow_table.c | 491 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, in ovs_flow_tbl_lookup_stats() argument 495 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); in ovs_flow_tbl_lookup_stats() 500 list_for_each_entry_rcu(mask, &tbl->mask_list, list) { in ovs_flow_tbl_lookup_stats() 509 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, in ovs_flow_tbl_lookup() argument 514 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit); in ovs_flow_tbl_lookup() 517 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, in ovs_flow_tbl_lookup_exact() argument 520 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); in ovs_flow_tbl_lookup_exact() 525 list_for_each_entry(mask, &tbl->mask_list, list) { in ovs_flow_tbl_lookup_exact() 556 struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl, in ovs_flow_tbl_lookup_ufid() argument 559 struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti); in ovs_flow_tbl_lookup_ufid() [all …]
|
| D | flow_table.h | 81 struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
|
| D | flow_netlink.c | 1137 const struct ovs_len_tbl *tbl) in nlattr_set() argument 1144 if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) { in nlattr_set() 1145 if (tbl[nla_type(nla)].next) in nlattr_set() 1146 tbl = tbl[nla_type(nla)].next; in nlattr_set() 1147 nlattr_set(nla, val, tbl); in nlattr_set()
|
| /linux-4.4.14/arch/blackfin/kernel/ |
| D | cplbinfo.c | 33 struct cplb_entry *tbl; member 55 addr = cdata->tbl[pos].addr; in cplbinfo_show() 56 data = cdata->tbl[pos].data; in cplbinfo_show() 73 cdata->tbl = icplb_tbl[cpu]; in cplbinfo_seq_init() 77 cdata->tbl = dcplb_tbl[cpu]; in cplbinfo_seq_init()
|
| /linux-4.4.14/arch/arm64/kernel/ |
| D | head.S | 257 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 260 add \tmp2, \tbl, #PAGE_SIZE 262 str \tmp2, [\tbl, \tmp1, lsl #3] 263 add \tbl, \tbl, #PAGE_SIZE // next level table page 273 .macro create_pgd_entry, tbl, virt, tmp1, tmp2 274 create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 276 create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2 279 create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 290 .macro create_block_map, tbl, flags, phys, start, end 297 9999: str \phys, [\tbl, \start, lsl #3] // store the entry
|
| /linux-4.4.14/drivers/video/fbdev/riva/ |
| D | riva_hw.c | 1344 #define LOAD_FIXED_STATE(tbl,dev) \ 1345 for (i = 0; i < sizeof(tbl##Table##dev)/8; i++) \ 1346 chip->dev[tbl##Table##dev[i][0]] = tbl##Table##dev[i][1] 1347 #define LOAD_FIXED_STATE_8BPP(tbl,dev) \ 1348 for (i = 0; i < sizeof(tbl##Table##dev##_8BPP)/8; i++) \ 1349 chip->dev[tbl##Table##dev##_8BPP[i][0]] = tbl##Table##dev##_8BPP[i][1] 1350 #define LOAD_FIXED_STATE_15BPP(tbl,dev) \ 1351 for (i = 0; i < sizeof(tbl##Table##dev##_15BPP)/8; i++) \ 1352 chip->dev[tbl##Table##dev##_15BPP[i][0]] = tbl##Table##dev##_15BPP[i][1] 1353 #define LOAD_FIXED_STATE_16BPP(tbl,dev) \ [all …]
|
| /linux-4.4.14/net/decnet/ |
| D | dn_rules.c | 72 struct dn_fib_table *tbl; in dn_fib_rule_action() local 92 tbl = dn_fib_get_table(rule->table, 0); in dn_fib_rule_action() 93 if (tbl == NULL) in dn_fib_rule_action() 96 err = tbl->lookup(tbl, fld, (struct dn_fib_res *)arg->result); in dn_fib_rule_action()
|
| D | dn_neigh.c | 87 .tbl = &dn_neigh_table,
|
| /linux-4.4.14/arch/powerpc/boot/ |
| D | cuboot-c2k.c | 36 struct mv64x60_cpu2pci_win *tbl; in c2k_bridge_setup() local 90 tbl = mv64x60_cpu2pci_io; in c2k_bridge_setup() 93 tbl = mv64x60_cpu2pci_mem; in c2k_bridge_setup() 112 pci_base_hi, pci_base_lo, cpu_base, size, tbl); in c2k_bridge_setup()
|
| /linux-4.4.14/arch/arm64/crypto/ |
| D | aes-neon.S | 53 tbl \in\().16b, {v16.16b-v19.16b}, \in\().16b 90 tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */ 120 tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b 121 tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b 136 tbl \in0\().16b, {v16.16b-v19.16b}, \in0\().16b 138 tbl \in1\().16b, {v16.16b-v19.16b}, \in1\().16b 140 tbl \in2\().16b, {v16.16b-v19.16b}, \in2\().16b 142 tbl \in3\().16b, {v16.16b-v19.16b}, \in3\().16b 238 tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */ 239 tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */ [all …]
|
| /linux-4.4.14/firmware/av7110/ |
| D | Boot.S | 46 .word tbl // table needed by firmware ROM 47 tbl: .word (endtbl - tbl) label
|
| /linux-4.4.14/fs/cifs/ |
| D | winucase.c | 643 const wchar_t *tbl; in cifs_toupper() local 650 tbl = toplevel[idx]; in cifs_toupper() 651 if (!tbl) in cifs_toupper() 658 out = tbl[idx]; in cifs_toupper()
|
| /linux-4.4.14/arch/powerpc/sysdev/ |
| D | dart_iommu.c | 154 static void dart_flush(struct iommu_table *tbl) in dart_flush() argument 163 static int dart_build(struct iommu_table *tbl, long index, in dart_build() argument 174 dp = ((unsigned int*)tbl->it_base) + index; in dart_build() 204 static void dart_free(struct iommu_table *tbl, long index, long npages) in dart_free() argument 215 dp = ((unsigned int *)tbl->it_base) + index; in dart_free()
|
| /linux-4.4.14/arch/arc/kernel/ |
| D | setup.c | 168 const struct cpuinfo_data *tbl; in arc_cpu_mumbojumbo() local 192 for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) { in arc_cpu_mumbojumbo() 193 if ((core->family >= tbl->info.id) && in arc_cpu_mumbojumbo() 194 (core->family <= tbl->up_range)) { in arc_cpu_mumbojumbo() 197 cpu_id, tbl->info.str, isa_nm, in arc_cpu_mumbojumbo() 203 if (tbl->info.id == 0) in arc_cpu_mumbojumbo()
|
| /linux-4.4.14/net/netlink/ |
| D | diag.c | 105 struct netlink_table *tbl = &nl_table[protocol]; in __netlink_diag_dump() local 106 struct rhashtable *ht = &tbl->hash; in __netlink_diag_dump() 107 const struct bucket_table *htbl = rht_dereference_rcu(ht->tbl, ht); in __netlink_diag_dump() 142 sk_for_each_bound(sk, &tbl->mc_list) { in __netlink_diag_dump()
|
| D | af_netlink.c | 1085 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; in netlink_update_listeners() local 1090 listeners = nl_deref_protected(tbl->listeners); in netlink_update_listeners() 1094 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { in netlink_update_listeners() 1096 sk_for_each_bound(sk, &tbl->mc_list) { in netlink_update_listeners() 2706 struct netlink_table *tbl = &nl_table[sk->sk_protocol]; in __netlink_change_ngroups() local 2711 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { in __netlink_change_ngroups() 2715 old = nl_deref_protected(tbl->listeners); in __netlink_change_ngroups() 2716 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); in __netlink_change_ngroups() 2717 rcu_assign_pointer(tbl->listeners, new); in __netlink_change_ngroups() 2721 tbl->groups = groups; in __netlink_change_ngroups() [all …]
|
| /linux-4.4.14/drivers/power/ |
| D | ab8500_btemp.c | 471 const struct abx500_res_to_temp *tbl, int tbl_size, int res) in ab8500_btemp_res_to_temp() argument 480 if (res > tbl[0].resist) in ab8500_btemp_res_to_temp() 482 else if (res <= tbl[tbl_size - 1].resist) in ab8500_btemp_res_to_temp() 486 while (!(res <= tbl[i].resist && in ab8500_btemp_res_to_temp() 487 res > tbl[i + 1].resist)) in ab8500_btemp_res_to_temp() 491 temp = tbl[i].temp + ((tbl[i + 1].temp - tbl[i].temp) * in ab8500_btemp_res_to_temp() 492 (res - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist); in ab8500_btemp_res_to_temp()
|
| D | ab8500_fg.c | 864 const struct abx500_v_to_cap *tbl; in ab8500_fg_volt_to_capacity() local 867 tbl = di->bm->bat_type[di->bm->batt_id].v_to_cap_tbl, in ab8500_fg_volt_to_capacity() 871 if (voltage > tbl[i].voltage) in ab8500_fg_volt_to_capacity() 877 tbl[i].voltage, in ab8500_fg_volt_to_capacity() 878 tbl[i].capacity * 10, in ab8500_fg_volt_to_capacity() 879 tbl[i-1].voltage, in ab8500_fg_volt_to_capacity() 880 tbl[i-1].capacity * 10); in ab8500_fg_volt_to_capacity() 916 const struct batres_vs_temp *tbl; in ab8500_fg_battery_resistance() local 919 tbl = di->bm->bat_type[di->bm->batt_id].batres_tbl; in ab8500_fg_battery_resistance() 923 if (di->bat_temp / 10 > tbl[i].temp) in ab8500_fg_battery_resistance() [all …]
|
| D | bq25890_charger.c | 278 const u32 *tbl; member 324 const u32 *tbl = bq25890_tables[id].lt.tbl; in bq25890_find_idx() local 327 for (idx = 1; idx < tbl_size && tbl[idx] <= value; idx++) in bq25890_find_idx() 350 return bq25890_tables[id].lt.tbl[idx]; in bq25890_find_val()
|
| D | rt9455_charger.c | 263 static unsigned int rt9455_find_idx(const int tbl[], int tbl_size, int v) in rt9455_find_idx() argument 274 if (v <= tbl[i]) in rt9455_find_idx() 282 const int tbl[], int tbl_size, int *val) in rt9455_get_field_val() argument 292 *val = tbl[v]; in rt9455_get_field_val() 299 const int tbl[], int tbl_size, int val) in rt9455_set_field_val() argument 301 unsigned int idx = rt9455_find_idx(tbl, tbl_size, val); in rt9455_set_field_val()
|
| D | bq24190_charger.c | 213 static u8 bq24190_find_idx(const int tbl[], int tbl_size, int v) in bq24190_find_idx() argument 218 if (v < tbl[i]) in bq24190_find_idx() 278 const int tbl[], int tbl_size, in bq24190_get_field_val() argument 289 *val = tbl[v]; in bq24190_get_field_val() 296 const int tbl[], int tbl_size, in bq24190_set_field_val() argument 301 idx = bq24190_find_idx(tbl, tbl_size, val); in bq24190_set_field_val()
|
| D | smb347-charger.c | 206 static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val) in hw_to_current() argument 210 return tbl[val]; in hw_to_current() 214 static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val) in current_to_hw() argument 219 if (val < tbl[i]) in current_to_hw()
|
| /linux-4.4.14/net/netlabel/ |
| D | netlabel_domainhash.c | 48 struct list_head *tbl; member 148 bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt]; in netlbl_domhsh_search() 337 hsh_tbl->tbl = kcalloc(hsh_tbl->size, in netlbl_domhsh_init() 340 if (hsh_tbl->tbl == NULL) { in netlbl_domhsh_init() 345 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); in netlbl_domhsh_init() 397 &rcu_dereference(netlbl_domhsh)->tbl[bkt]); in netlbl_domhsh_add() 772 iter_list = &rcu_dereference(netlbl_domhsh)->tbl[iter_bkt]; in netlbl_domhsh_walk()
|
| D | netlabel_unlabeled.c | 77 struct list_head *tbl; member 229 bkt_list = &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]; in netlbl_unlhsh_search_iface() 351 &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt]); in netlbl_unlhsh_add_iface() 1204 iter_list = &rcu_dereference(netlbl_unlhsh)->tbl[iter_bkt]; in netlbl_unlabel_staticlist() 1430 hsh_tbl->tbl = kcalloc(hsh_tbl->size, in netlbl_unlabel_init() 1433 if (hsh_tbl->tbl == NULL) { in netlbl_unlabel_init() 1438 INIT_LIST_HEAD(&hsh_tbl->tbl[iter]); in netlbl_unlabel_init()
|
| /linux-4.4.14/drivers/sbus/char/ |
| D | envctrl.c | 328 int scale, char *tbl, char *bufdata) in envctrl_i2c_data_translate() argument 342 bufdata[0] = tbl[data]; in envctrl_i2c_data_translate() 347 sprintf(bufdata,"%d ", (tbl[data] * 10) / (scale)); in envctrl_i2c_data_translate() 368 char *tbl, j = -1; in envctrl_read_cpu_info() local 387 tbl = pchild->tables + pchild->tblprop_array[i].offset; in envctrl_read_cpu_info() 391 tbl, bufdata); in envctrl_read_cpu_info() 403 char *tbl = NULL; in envctrl_read_noncpu_info() local 418 tbl = pchild->tables + pchild->tblprop_array[i].offset; in envctrl_read_noncpu_info() 422 tbl, bufdata); in envctrl_read_noncpu_info()
|
| /linux-4.4.14/arch/arm/kernel/ |
| D | entry-common.S | 200 uaccess_disable tbl 202 adr tbl, sys_call_table @ load syscall table pointer 213 ldrne tbl, =sys_oabi_call_table 228 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 268 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 325 ldrlo pc, [tbl, scno, lsl #2]
|
| D | entry-header.S | 381 tbl .req r8 @ syscall table pointer label
|
| /linux-4.4.14/drivers/firmware/efi/ |
| D | efi.c | 433 efi_properties_table_t *tbl; in efi_config_parse_tables() local 435 tbl = early_memremap(efi.properties_table, sizeof(*tbl)); in efi_config_parse_tables() 436 if (tbl == NULL) { in efi_config_parse_tables() 441 if (tbl->memory_protection_attribute & in efi_config_parse_tables() 445 early_memunmap(tbl, sizeof(*tbl)); in efi_config_parse_tables()
|
| /linux-4.4.14/drivers/net/wireless/ath/wil6210/ |
| D | debugfs.c | 324 const struct dbg_off * const tbl) in wil6210_debugfs_init_offset() argument 328 for (i = 0; tbl[i].name; i++) { in wil6210_debugfs_init_offset() 331 switch (tbl[i].type) { in wil6210_debugfs_init_offset() 333 f = debugfs_create_u32(tbl[i].name, tbl[i].mode, dbg, in wil6210_debugfs_init_offset() 334 base + tbl[i].off); in wil6210_debugfs_init_offset() 337 f = debugfs_create_x32(tbl[i].name, tbl[i].mode, dbg, in wil6210_debugfs_init_offset() 338 base + tbl[i].off); in wil6210_debugfs_init_offset() 341 f = wil_debugfs_create_ulong(tbl[i].name, tbl[i].mode, in wil6210_debugfs_init_offset() 342 dbg, base + tbl[i].off); in wil6210_debugfs_init_offset() 345 f = wil_debugfs_create_iomem_x32(tbl[i].name, in wil6210_debugfs_init_offset() [all …]
|
| /linux-4.4.14/arch/x86/entry/syscalls/ |
| D | Makefile | 8 syscall32 := $(srctree)/$(src)/syscall_32.tbl 9 syscall64 := $(srctree)/$(src)/syscall_64.tbl
|
| /linux-4.4.14/drivers/mfd/ |
| D | rtsx_pcr.c | 563 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl) in rtsx_pci_set_pull_ctl() argument 567 while (*tbl & 0xFFFF0000) { in rtsx_pci_set_pull_ctl() 569 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl)); in rtsx_pci_set_pull_ctl() 570 tbl++; in rtsx_pci_set_pull_ctl() 578 const u32 *tbl; in rtsx_pci_card_pull_ctl_enable() local 581 tbl = pcr->sd_pull_ctl_enable_tbl; in rtsx_pci_card_pull_ctl_enable() 583 tbl = pcr->ms_pull_ctl_enable_tbl; in rtsx_pci_card_pull_ctl_enable() 587 return rtsx_pci_set_pull_ctl(pcr, tbl); in rtsx_pci_card_pull_ctl_enable() 593 const u32 *tbl; in rtsx_pci_card_pull_ctl_disable() local 596 tbl = pcr->sd_pull_ctl_disable_tbl; in rtsx_pci_card_pull_ctl_disable() [all …]
|
| D | menelaus.c | 493 static int menelaus_get_vtg_value(int vtg, const struct menelaus_vtg_value *tbl, in menelaus_get_vtg_value() argument 498 for (i = 0; i < n; i++, tbl++) in menelaus_get_vtg_value() 499 if (tbl->vtg == vtg) in menelaus_get_vtg_value() 500 return tbl->val; in menelaus_get_vtg_value()
|
| /linux-4.4.14/arch/powerpc/platforms/cell/ |
| D | iommu.c | 167 static int tce_build_cell(struct iommu_table *tbl, long index, long npages, in tce_build_cell() argument 174 container_of(tbl, struct iommu_window, table); in tce_build_cell() 199 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); in tce_build_cell() 201 for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift)) in tce_build_cell() 213 static void tce_free_cell(struct iommu_table *tbl, long index, long npages) in tce_free_cell() argument 219 container_of(tbl, struct iommu_window, table); in tce_free_cell() 233 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); in tce_free_cell()
|
| /linux-4.4.14/drivers/soc/qcom/ |
| D | smd.c | 1106 int tbl; in qcom_discover_channels() local 1110 for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) { in qcom_discover_channels() 1112 smem_items[tbl].alloc_tbl_id, NULL); in qcom_discover_channels() 1119 if (test_bit(i, edge->allocated[tbl])) in qcom_discover_channels() 1135 info_id = smem_items[tbl].info_base_id + cid; in qcom_discover_channels() 1136 fifo_id = smem_items[tbl].fifo_base_id + cid; in qcom_discover_channels() 1147 set_bit(i, edge->allocated[tbl]); in qcom_discover_channels()
|
| /linux-4.4.14/sound/pci/ |
| D | intel8x0m.c | 772 struct ich_pcm_table *tbl, *rec; in snd_intel8x0m_pcm() local 775 tbl = intel_pcms; in snd_intel8x0m_pcm() 780 tbl = nforce_pcms; in snd_intel8x0m_pcm() 784 tbl = ali_pcms; in snd_intel8x0m_pcm() 788 tbl = intel_pcms; in snd_intel8x0m_pcm() 795 rec = tbl + i; in snd_intel8x0m_pcm() 1124 struct ich_reg_info *tbl; in snd_intel8x0m_create() local 1186 tbl = intel_regs; in snd_intel8x0m_create() 1191 ichdev->reg_offset = tbl[i].offset; in snd_intel8x0m_create() 1192 ichdev->int_sta_mask = tbl[i].int_sta_mask; in snd_intel8x0m_create()
|
| D | intel8x0.c | 1668 struct ich_pcm_table *tbl, *rec; in snd_intel8x0_pcm() local 1672 tbl = intel_pcms; in snd_intel8x0_pcm() 1678 tbl = nforce_pcms; in snd_intel8x0_pcm() 1684 tbl = ali_pcms; in snd_intel8x0_pcm() 1688 tbl = intel_pcms; in snd_intel8x0_pcm() 1695 rec = tbl + i; in snd_intel8x0_pcm() 3046 struct ich_reg_info *tbl; in snd_intel8x0_create() local 3113 tbl = nforce_regs; in snd_intel8x0_create() 3116 tbl = ali_regs; in snd_intel8x0_create() 3119 tbl = intel_regs; in snd_intel8x0_create() [all …]
|
| /linux-4.4.14/drivers/net/bonding/ |
| D | bond_options.c | 506 const struct bond_opt_value *tbl; in bond_opt_parse() local 515 tbl = opt->values; in bond_opt_parse() 516 if (!tbl) in bond_opt_parse() 543 for (i = 0; tbl[i].string; i++) { in bond_opt_parse() 546 if (val->value == tbl[i].value) in bond_opt_parse() 547 ret = &tbl[i]; in bond_opt_parse() 550 (tbl[i].flags & BOND_VALFLAG_DEFAULT)) in bond_opt_parse() 551 ret = &tbl[i]; in bond_opt_parse() 553 if (!strcmp(valstr, tbl[i].string)) in bond_opt_parse() 554 ret = &tbl[i]; in bond_opt_parse()
|
| /linux-4.4.14/sound/core/ |
| D | sgbuf.c | 31 #define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN) argument
|
| /linux-4.4.14/sound/core/oss/ |
| D | mixer_oss.c | 1182 struct snd_mixer_oss_assign_table *tbl; in snd_mixer_oss_proc_write() local 1215 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); in snd_mixer_oss_proc_write() 1216 if (!tbl) in snd_mixer_oss_proc_write() 1218 tbl->oss_id = ch; in snd_mixer_oss_proc_write() 1219 tbl->name = kstrdup(str, GFP_KERNEL); in snd_mixer_oss_proc_write() 1220 if (! tbl->name) { in snd_mixer_oss_proc_write() 1221 kfree(tbl); in snd_mixer_oss_proc_write() 1224 tbl->index = idx; in snd_mixer_oss_proc_write() 1225 if (snd_mixer_oss_build_input(mixer, tbl, 1, 1) <= 0) { in snd_mixer_oss_proc_write() 1226 kfree(tbl->name); in snd_mixer_oss_proc_write() [all …]
|
| /linux-4.4.14/tools/perf/util/intel-pt-decoder/ |
| D | gen-insn-attr-x86.awk | 156 function print_table(tbl,name,fmt,n) 161 if (tbl[id]) 162 print " [" id "] = " tbl[id] ","
|
| /linux-4.4.14/arch/x86/tools/ |
| D | gen-insn-attr-x86.awk | 156 function print_table(tbl,name,fmt,n) 161 if (tbl[id]) 162 print " [" id "] = " tbl[id] ","
|
| /linux-4.4.14/drivers/iommu/ |
| D | amd_iommu_v2.c | 304 static void free_pasid_states_level1(struct pasid_state **tbl) in free_pasid_states_level1() argument 309 if (tbl[i] == NULL) in free_pasid_states_level1() 312 free_page((unsigned long)tbl[i]); in free_pasid_states_level1() 316 static void free_pasid_states_level2(struct pasid_state **tbl) in free_pasid_states_level2() argument 322 if (tbl[i] == NULL) in free_pasid_states_level2() 325 ptr = (struct pasid_state **)tbl[i]; in free_pasid_states_level2()
|
| D | amd_iommu.c | 1770 static void free_gcr3_tbl_level1(u64 *tbl) in free_gcr3_tbl_level1() argument 1776 if (!(tbl[i] & GCR3_VALID)) in free_gcr3_tbl_level1() 1779 ptr = __va(tbl[i] & PAGE_MASK); in free_gcr3_tbl_level1() 1785 static void free_gcr3_tbl_level2(u64 *tbl) in free_gcr3_tbl_level2() argument 1791 if (!(tbl[i] & GCR3_VALID)) in free_gcr3_tbl_level2() 1794 ptr = __va(tbl[i] & PAGE_MASK); in free_gcr3_tbl_level2()
|
| D | intel-iommu.c | 2891 struct context_entry **tbl, in copy_context_table() argument 2910 tbl[tbl_idx] = new_ce; in copy_context_table() 2980 tbl[tbl_idx + pos] = new_ce; in copy_context_table()
|
| /linux-4.4.14/net/ipv6/ |
| D | ip6_fib.c | 1931 struct fib6_table *tbl; member 1981 iter->w.root = &iter->tbl->tb6_root; in ipv6_route_seq_setup_walk() 1990 static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl, in ipv6_route_seq_next_table() argument 1996 if (tbl) { in ipv6_route_seq_next_table() 1997 h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1; in ipv6_route_seq_next_table() 1998 node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist)); in ipv6_route_seq_next_table() 2040 read_lock(&iter->tbl->tb6_lock); in ipv6_route_seq_next() 2042 read_unlock(&iter->tbl->tb6_lock); in ipv6_route_seq_next() 2053 iter->tbl = ipv6_route_seq_next_table(iter->tbl, net); in ipv6_route_seq_next() 2054 if (!iter->tbl) in ipv6_route_seq_next() [all …]
|
| D | ndisc.c | 131 .tbl = &nd_tbl,
|
| /linux-4.4.14/arch/sparc/include/asm/ |
| D | iommu_64.h | 28 struct iommu_map_table tbl; member
|
| /linux-4.4.14/scripts/ |
| D | checksyscalls.sh | 215 (ignore_list && syscall_list $(dirname $0)/../arch/x86/entry/syscalls/syscall_32.tbl) | \
|
| /linux-4.4.14/arch/parisc/include/asm/ |
| D | pdc.h | 298 int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl); 306 struct pdc_memory_table *tbl, unsigned long entries);
|
| /linux-4.4.14/arch/cris/include/arch-v32/arch/hwregs/ |
| D | eth_defs.h | 119 unsigned int tbl : 32; member 126 unsigned int tbl : 32; member
|
| /linux-4.4.14/arch/parisc/kernel/ |
| D | firmware.c | 873 int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl) in pdc_pci_irt() argument 878 BUG_ON((unsigned long)tbl & 0x7); in pdc_pci_irt() 883 __pa(pdc_result), hpa, __pa(tbl)); in pdc_pci_irt() 982 struct pdc_memory_table *tbl, unsigned long entries) in pdc_mem_mem_table() argument 991 memcpy(tbl, pdc_result2, entries * sizeof(*tbl)); in pdc_mem_mem_table()
|
| /linux-4.4.14/drivers/net/wireless/cw1200/ |
| D | txrx.h | 26 __le32 tbl[3]; member
|
| D | txrx.c | 187 policy->tbl[off] |= __cpu_to_le32(retries << shift); in tx_policy_build() 369 &arg.tbl[arg.num]; in tx_policy_upload() 376 memcpy(dst->rate_count_indices, src->tbl, in tx_policy_upload()
|
| D | wsm.h | 1559 struct wsm_tx_rate_retry_policy tbl[8]; member
|
| /linux-4.4.14/drivers/net/wireless/ath/ath6kl/ |
| D | debug.c | 1127 const struct wmi_target_roam_tbl *tbl; in ath6kl_debug_roam_tbl_event() local 1130 if (len < sizeof(*tbl)) in ath6kl_debug_roam_tbl_event() 1133 tbl = (const struct wmi_target_roam_tbl *) buf; in ath6kl_debug_roam_tbl_event() 1134 num_entries = le16_to_cpu(tbl->num_entries); in ath6kl_debug_roam_tbl_event() 1135 if (sizeof(*tbl) + num_entries * sizeof(struct wmi_bss_roam_info) > in ath6kl_debug_roam_tbl_event() 1164 struct wmi_target_roam_tbl *tbl; in ath6kl_roam_table_read() local 1191 tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl; in ath6kl_roam_table_read() 1192 num_entries = le16_to_cpu(tbl->num_entries); in ath6kl_roam_table_read() 1202 le16_to_cpu(tbl->roam_mode)); in ath6kl_roam_table_read() 1205 struct wmi_bss_roam_info *info = &tbl->info[i]; in ath6kl_roam_table_read()
|
| /linux-4.4.14/net/802/ |
| D | hippi.c | 154 if (p->tbl->family != AF_INET6) in hippi_neigh_setup_dev()
|
| /linux-4.4.14/net/tipc/ |
| D | socket.c | 2243 const struct bucket_table *tbl; in tipc_sk_reinit() local 2250 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); in tipc_sk_reinit() 2251 for (i = 0; i < tbl->size; i++) { in tipc_sk_reinit() 2252 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) { in tipc_sk_reinit() 2685 const struct bucket_table *tbl; in tipc_nl_sk_dump() local 2693 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); in tipc_nl_sk_dump() 2694 for (; tbl_id < tbl->size; tbl_id++) { in tipc_nl_sk_dump() 2695 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) { in tipc_nl_sk_dump()
|
| /linux-4.4.14/drivers/staging/media/davinci_vpfe/ |
| D | dm365_ipipe_hw.c | 776 struct vpfe_ipipe_3d_lut_entry *tbl; in ipipe_set_3d_lut_regs() local 793 tbl = lut_3d->table; in ipipe_set_3d_lut_regs() 797 val = tbl[i].b & D3_LUT_ENTRY_MASK; in ipipe_set_3d_lut_regs() 798 val |= (tbl[i].g & D3_LUT_ENTRY_MASK) << in ipipe_set_3d_lut_regs() 800 val |= (tbl[i].r & D3_LUT_ENTRY_MASK) << in ipipe_set_3d_lut_regs()
|
| /linux-4.4.14/sound/pci/ice1712/ |
| D | ice1724.c | 2310 struct snd_ice1712_card_info * const *tbl, *c; in snd_vt1724_read_eeprom() local 2339 for (tbl = card_tables; *tbl; tbl++) { in snd_vt1724_read_eeprom() 2340 for (c = *tbl; c->name; c++) { in snd_vt1724_read_eeprom() 2624 struct snd_ice1712_card_info * const *tbl, *c; in snd_vt1724_probe() local 2650 for (tbl = card_tables; *tbl; tbl++) { in snd_vt1724_probe() 2651 for (c = *tbl; c->name; c++) { in snd_vt1724_probe()
|
| D | ice1712.c | 2290 struct snd_ice1712_card_info * const *tbl, *c; in snd_ice1712_read_eeprom() local 2313 for (tbl = card_tables; *tbl; tbl++) { in snd_ice1712_read_eeprom() 2314 for (c = *tbl; c->subvendor; c++) { in snd_ice1712_read_eeprom() 2635 struct snd_ice1712_card_info * const *tbl, *c; in snd_ice1712_probe() local 2659 for (tbl = card_tables; *tbl; tbl++) { in snd_ice1712_probe() 2660 for (c = *tbl; c->subvendor; c++) { in snd_ice1712_probe()
|
| /linux-4.4.14/arch/unicore32/kernel/ |
| D | entry.S | 137 tbl .req r22 @ syscall table pointer label 618 ldw tbl, =sys_call_table @ load syscall table pointer 631 ldw pc, [tbl+], scno << #2 @ call sys_* routine 653 ldw pc, [tbl+], scno << #2 @ call sys_* routine
|
| /linux-4.4.14/tools/perf/ |
| D | builtin-kvm.c | 73 struct exit_reasons_table *tbl, in get_exit_reason() argument 76 while (tbl->reason != NULL) { in get_exit_reason() 77 if (tbl->exit_code == exit_code) in get_exit_reason() 78 return tbl->reason; in get_exit_reason() 79 tbl++; in get_exit_reason()
|
| /linux-4.4.14/drivers/net/ethernet/ibm/ |
| D | ibmveth.c | 1411 struct iommu_table *tbl; in ibmveth_get_desired_dma() local 1416 tbl = get_iommu_table_base(&vdev->dev); in ibmveth_get_desired_dma() 1420 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl); in ibmveth_get_desired_dma() 1425 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl); in ibmveth_get_desired_dma() 1433 buff_size, tbl); in ibmveth_get_desired_dma() 1438 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl); in ibmveth_get_desired_dma()
|
| /linux-4.4.14/Documentation/ |
| D | adding-syscalls.txt | 247 arch/x86/entry/syscalls/syscall_64.tbl: 251 and an "i386" entry in arch/x86/entry/syscalls/syscall_32.tbl: 350 First, the entry in arch/x86/entry/syscalls/syscall_32.tbl gets an extra 362 arch/x86/entry/syscalls/syscall_64.tbl is split so that x32 programs hit the 371 arch/x86/entry/syscalls/syscall_64.tbl is unchanged). 400 (arch/x86/entry/syscalls/syscall_64.tbl) is adjusted to match: 407 arch/x86/entry/syscalls/syscall_32.tbl:
|
| /linux-4.4.14/arch/mips/include/asm/txx9/ |
| D | tx3927.h | 112 volatile unsigned long tbl; /* +d0 */ member
|
| /linux-4.4.14/drivers/char/tpm/ |
| D | tpm_tis.c | 124 struct acpi_table_tpm2 *tbl; in is_fifo() local 132 (struct acpi_table_header **) &tbl); in is_fifo() 138 if (le32_to_cpu(tbl->start_method) != TPM2_START_FIFO) in is_fifo()
|
| /linux-4.4.14/fs/gfs2/ |
| D | glock.c | 1421 const struct bucket_table *tbl; in glock_hash_walk() local 1425 tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table); in glock_hash_walk() 1426 for (i = 0; i < tbl->size; i++) { in glock_hash_walk() 1427 rht_for_each_entry_safe(gl, pos, next, tbl, i, gl_node) { in glock_hash_walk()
|
| /linux-4.4.14/fs/nfs/filelayout/ |
| D | filelayout.c | 132 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; in filelayout_async_handle_error() local 191 rpc_wake_up(&tbl->slot_tbl_waitq); in filelayout_async_handle_error() 205 rpc_wake_up(&tbl->slot_tbl_waitq); in filelayout_async_handle_error()
|
| /linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4/ |
| D | l2t.c | 371 int addr_len = neigh->tbl->key_len; in cxgb4_l2t_get() 484 int addr_len = neigh->tbl->key_len; in t4_l2t_update()
|
| /linux-4.4.14/net/bridge/ |
| D | br_vlan.c | 29 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid) in br_vlan_lookup() argument 31 return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params); in br_vlan_lookup()
|
| /linux-4.4.14/sound/pci/ac97/ |
| D | ac97_codec.c | 1103 const struct snd_ac97_res_table *tbl; in check_volume_resolution() local 1104 for (tbl = ac97->res_table; tbl->reg; tbl++) { in check_volume_resolution() 1105 if (tbl->reg == reg) { in check_volume_resolution() 1106 *lo_max = tbl->bits & 0xff; in check_volume_resolution() 1107 *hi_max = (tbl->bits >> 8) & 0xff; in check_volume_resolution()
|
| /linux-4.4.14/kernel/ |
| D | workqueue.c | 5222 cpumask_var_t *tbl; in wq_numa_init() local 5241 tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL); in wq_numa_init() 5242 BUG_ON(!tbl); in wq_numa_init() 5245 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL, in wq_numa_init() 5255 cpumask_set_cpu(cpu, tbl[node]); in wq_numa_init() 5258 wq_numa_possible_cpumask = tbl; in wq_numa_init()
|
| /linux-4.4.14/drivers/acpi/ |
| D | nfit.c | 1736 struct acpi_table_header *tbl; in acpi_nfit_add() local 1741 status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz); in acpi_nfit_add() 1759 acpi_desc->acpi_header = *tbl; in acpi_nfit_add() 1760 acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit); in acpi_nfit_add()
|
| /linux-4.4.14/drivers/media/platform/s5p-jpeg/ |
| D | jpeg-core.c | 703 const unsigned char *tbl, in exynos4_jpeg_set_tbl() argument 710 dword = tbl[i] | in exynos4_jpeg_set_tbl() 711 (tbl[i + 1] << 8) | in exynos4_jpeg_set_tbl() 712 (tbl[i + 2] << 16) | in exynos4_jpeg_set_tbl() 713 (tbl[i + 3] << 24); in exynos4_jpeg_set_tbl()
|
| /linux-4.4.14/net/sched/ |
| D | sch_teql.c | 233 mn = __neigh_lookup_errno(n->tbl, n->primary_key, dev); in __teql_resolve()
|
| /linux-4.4.14/drivers/net/ethernet/qlogic/qed/ |
| D | qed_main.c | 360 struct msix_entry *tbl; in qed_set_int_mode() local 367 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); in qed_set_int_mode()
|
| /linux-4.4.14/fs/nfs/flexfilelayout/ |
| D | flexfilelayout.c | 974 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; in ff_layout_async_handle_error_v4() local 1036 rpc_wake_up(&tbl->slot_tbl_waitq); in ff_layout_async_handle_error_v4() 1049 rpc_wake_up(&tbl->slot_tbl_waitq); in ff_layout_async_handle_error_v4()
|
| /linux-4.4.14/net/atm/ |
| D | lec.c | 834 static void *lec_tbl_walk(struct lec_state *state, struct hlist_head *tbl, in lec_tbl_walk() argument 840 e = tbl->first; in lec_tbl_walk() 842 e = tbl->first; in lec_tbl_walk()
|
| D | clip.c | 293 if (neigh->tbl->family != AF_INET) in clip_constructor()
|
| /linux-4.4.14/drivers/media/platform/ |
| D | rcar_jpu.c | 502 static void jpu_set_tbl(struct jpu *jpu, u32 reg, const unsigned int *tbl, in jpu_set_tbl() argument 507 jpu_write(jpu, tbl[i], reg + (i << 2)); in jpu_set_tbl()
|
| /linux-4.4.14/drivers/net/ethernet/broadcom/bnx2x/ |
| D | bnx2x_main.c | 14805 struct bdn_fc_npiv_tbl *tbl = NULL; in bnx2x_get_fc_npiv() local 14815 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); in bnx2x_get_fc_npiv() 14816 if (!tbl) { in bnx2x_get_fc_npiv() 14825 if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) { in bnx2x_get_fc_npiv() 14833 entries = tbl->fc_npiv_cfg.num_of_npiv; in bnx2x_get_fc_npiv() 14835 tbl->fc_npiv_cfg.num_of_npiv = entries; in bnx2x_get_fc_npiv() 14837 if (!tbl->fc_npiv_cfg.num_of_npiv) { in bnx2x_get_fc_npiv() 14841 } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) { in bnx2x_get_fc_npiv() 14843 tbl->fc_npiv_cfg.num_of_npiv); in bnx2x_get_fc_npiv() 14847 tbl->fc_npiv_cfg.num_of_npiv); in bnx2x_get_fc_npiv() [all …]
|
| D | bnx2x_cmn.c | 4736 struct msix_entry *tbl; in bnx2x_alloc_mem_bp() local 4792 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL); in bnx2x_alloc_mem_bp() 4793 if (!tbl) in bnx2x_alloc_mem_bp() 4795 bp->msix_table = tbl; in bnx2x_alloc_mem_bp()
|
| /linux-4.4.14/sound/pci/hda/ |
| D | patch_cirrus.c | 1032 struct hda_jack_callback *tbl) in cs4210_spdif_automute() argument
|
| D | patch_ca0132.c | 4430 struct hda_jack_tbl *tbl; in hp_callback() local 4437 tbl = snd_hda_jack_tbl_get(codec, cb->nid); in hp_callback() 4438 if (tbl) in hp_callback() 4439 tbl->block_report = 1; in hp_callback()
|
| /linux-4.4.14/Documentation/DocBook/ |
| D | kernel-api.xml.db | 712 API-disk-replace-part-tbl 713 API-disk-expand-part-tbl
|
| /linux-4.4.14/drivers/net/wireless/brcm80211/brcmsmac/phy/ |
| D | phy_n.c | 14164 struct phytbl_info tbl; in wlc_phy_table_write_nphy() local 14166 tbl.tbl_id = id; in wlc_phy_table_write_nphy() 14167 tbl.tbl_len = len; in wlc_phy_table_write_nphy() 14168 tbl.tbl_offset = offset; in wlc_phy_table_write_nphy() 14169 tbl.tbl_width = width; in wlc_phy_table_write_nphy() 14170 tbl.tbl_ptr = data; in wlc_phy_table_write_nphy() 14171 wlc_phy_write_table_nphy(pi, &tbl); in wlc_phy_table_write_nphy() 14178 struct phytbl_info tbl; in wlc_phy_table_read_nphy() local 14180 tbl.tbl_id = id; in wlc_phy_table_read_nphy() 14181 tbl.tbl_len = len; in wlc_phy_table_read_nphy() [all …]
|
| /linux-4.4.14/fs/nfsd/ |
| D | nfs4state.c | 2124 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) in find_client_in_id_table() argument 2129 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { in find_client_in_id_table() 2143 struct list_head *tbl = nn->conf_id_hashtbl; in find_confirmed_client() local 2146 return find_client_in_id_table(tbl, clid, sessions); in find_confirmed_client() 2152 struct list_head *tbl = nn->unconf_id_hashtbl; in find_unconfirmed_client() local 2155 return find_client_in_id_table(tbl, clid, sessions); in find_unconfirmed_client()
|
| /linux-4.4.14/drivers/s390/net/ |
| D | qeth_l3_main.c | 3129 if (np->tbl->family == AF_INET) in qeth_l3_neigh_setup()
|
| /linux-4.4.14/drivers/net/ethernet/sun/ |
| D | niu.c | 4733 struct rdc_table *tbl = &tp->tables[i]; in niu_init_rdc_groups() local 4739 tbl->rxdma_channel[slot]); in niu_init_rdc_groups()
|
| /linux-4.4.14/drivers/net/ethernet/rocker/ |
| D | rocker.c | 5448 if (n->tbl != &arp_tbl) in rocker_netevent_event()
|