/linux-4.4.14/drivers/net/ |
H A D | ifb.c | 67 struct ifb_q_private *txp = (struct ifb_q_private *)_txp; ifb_ri_tasklet() local 71 txq = netdev_get_tx_queue(txp->dev, txp->txqnum); ifb_ri_tasklet() 72 skb = skb_peek(&txp->tq); ifb_ri_tasklet() 76 skb_queue_splice_tail_init(&txp->rq, &txp->tq); ifb_ri_tasklet() 80 while ((skb = __skb_dequeue(&txp->tq)) != NULL) { ifb_ri_tasklet() 86 u64_stats_update_begin(&txp->tsync); ifb_ri_tasklet() 87 txp->tx_packets++; ifb_ri_tasklet() 88 txp->tx_bytes += skb->len; ifb_ri_tasklet() 89 u64_stats_update_end(&txp->tsync); ifb_ri_tasklet() 92 skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif); ifb_ri_tasklet() 96 txp->dev->stats.tx_dropped++; ifb_ri_tasklet() 97 if (skb_queue_len(&txp->tq) != 0) ifb_ri_tasklet() 102 skb->skb_iif = txp->dev->ifindex; ifb_ri_tasklet() 114 skb = skb_peek(&txp->rq); ifb_ri_tasklet() 116 txp->tasklet_pending = 0; ifb_ri_tasklet() 126 txp->tasklet_pending = 1; ifb_ri_tasklet() 127 tasklet_schedule(&txp->ifb_tasklet); ifb_ri_tasklet() 136 struct ifb_q_private *txp = dp->tx_private; ifb_stats64() local 141 for (i = 0; i < dev->num_tx_queues; i++,txp++) { ifb_stats64() 143 start = u64_stats_fetch_begin_irq(&txp->rsync); ifb_stats64() 144 packets = txp->rx_packets; ifb_stats64() 145 bytes = txp->rx_bytes; ifb_stats64() 146 } while (u64_stats_fetch_retry_irq(&txp->rsync, start)); ifb_stats64() 151 start = u64_stats_fetch_begin_irq(&txp->tsync); ifb_stats64() 152 packets = txp->tx_packets; ifb_stats64() 153 bytes = txp->tx_bytes; ifb_stats64() 154 } while (u64_stats_fetch_retry_irq(&txp->tsync, start)); ifb_stats64() 167 struct ifb_q_private *txp; ifb_dev_init() local 170 txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL); ifb_dev_init() 171 if (!txp) ifb_dev_init() 173 dp->tx_private = txp; ifb_dev_init() 174 for (i = 0; i < dev->num_tx_queues; i++,txp++) { ifb_dev_init() 175 txp->txqnum = i; ifb_dev_init() 176 txp->dev = dev; ifb_dev_init() 177 __skb_queue_head_init(&txp->rq); ifb_dev_init() 178 __skb_queue_head_init(&txp->tq); ifb_dev_init() 179 u64_stats_init(&txp->rsync); ifb_dev_init() 180 u64_stats_init(&txp->tsync); ifb_dev_init() 181 tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet, ifb_dev_init() 182 (unsigned long)txp); ifb_dev_init() 205 struct ifb_q_private *txp = dp->tx_private; ifb_dev_free() local 208 for (i = 0; i < dev->num_tx_queues; i++,txp++) { ifb_dev_free() 209 tasklet_kill(&txp->ifb_tasklet); ifb_dev_free() 210 __skb_queue_purge(&txp->rq); ifb_dev_free() 211 __skb_queue_purge(&txp->tq); ifb_dev_free() 242 struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb); ifb_xmit() local 244 u64_stats_update_begin(&txp->rsync); ifb_xmit() 245 txp->rx_packets++; ifb_xmit() 246 txp->rx_bytes += skb->len; ifb_xmit() 247 u64_stats_update_end(&txp->rsync); ifb_xmit() 255 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len) ifb_xmit() 256 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum)); ifb_xmit() 258 __skb_queue_tail(&txp->rq, skb); ifb_xmit() 259 if (!txp->tasklet_pending) { ifb_xmit() 260 txp->tasklet_pending = 1; ifb_xmit() 261 tasklet_schedule(&txp->ifb_tasklet); ifb_xmit()
|
/linux-4.4.14/drivers/spi/ |
H A D | spi-oc-tiny.c | 54 const u8 *txp; member in struct:tiny_spi 126 const u8 *txp = t->tx_buf; tiny_spi_txrx_bufs() local 133 hw->txp = t->tx_buf; tiny_spi_txrx_bufs() 140 writeb(hw->txp ? *hw->txp++ : 0, tiny_spi_txrx_bufs() 143 writeb(hw->txp ? *hw->txp++ : 0, tiny_spi_txrx_bufs() 148 writeb(hw->txp ? *hw->txp++ : 0, tiny_spi_txrx_bufs() 157 writeb(txp ? *txp++ : 0, hw->base + TINY_SPI_TXDATA); tiny_spi_txrx_bufs() 159 writeb(txp ? *txp++ : 0, hw->base + TINY_SPI_TXDATA); tiny_spi_txrx_bufs() 189 writeb(hw->txp ? *hw->txp++ : 0, tiny_spi_irq()
|
H A D | spi-falcon.c | 104 const u8 *txp = t->tx_buf; falcon_sflash_xfer() local 124 if (!txp) { falcon_sflash_xfer() 140 priv->sfcmd |= *txp; falcon_sflash_xfer() 141 txp++; falcon_sflash_xfer() 156 if (txp && bytelen) { falcon_sflash_xfer() 175 /* txp is valid, already checked */ falcon_sflash_xfer() 181 val = (val << 8) | (*txp++); falcon_sflash_xfer() 183 } else if ((dumlen < 15) && (*txp == 0)) { falcon_sflash_xfer() 189 txp++; falcon_sflash_xfer() 222 /* txp still valid */ falcon_sflash_xfer() 228 val |= (*txp++) << (8 * len++); falcon_sflash_xfer()
|
H A D | spi-ti-qspi.c | 250 u32 *txp = (u32 *)txbuf; qspi_write_msg() local 252 data = cpu_to_be32(*txp++); qspi_write_msg() 255 data = cpu_to_be32(*txp++); qspi_write_msg() 258 data = cpu_to_be32(*txp++); qspi_write_msg() 261 data = cpu_to_be32(*txp++); qspi_write_msg()
|
/linux-4.4.14/drivers/net/wireless/iwlwifi/ |
H A D | iwl-eeprom-parse.c | 355 struct iwl_eeprom_enhanced_txpwr *txp) iwl_get_max_txpwr_half_dbm() 360 if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result) iwl_get_max_txpwr_half_dbm() 361 result = txp->chain_a_max; iwl_get_max_txpwr_half_dbm() 363 if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result) iwl_get_max_txpwr_half_dbm() 364 result = txp->chain_b_max; iwl_get_max_txpwr_half_dbm() 366 if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result) iwl_get_max_txpwr_half_dbm() 367 result = txp->chain_c_max; iwl_get_max_txpwr_half_dbm() 371 data->valid_tx_ant == ANT_AC) && txp->mimo2_max > result) iwl_get_max_txpwr_half_dbm() 372 result = txp->mimo2_max; iwl_get_max_txpwr_half_dbm() 374 if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result) iwl_get_max_txpwr_half_dbm() 375 result = txp->mimo3_max; iwl_get_max_txpwr_half_dbm() 385 ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "") 389 struct iwl_eeprom_enhanced_txpwr *txp, iwl_eeprom_enh_txp_read_element() 395 band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ? iwl_eeprom_enh_txp_read_element() 402 if (txp->channel != 0 && chan->hw_value != txp->channel) iwl_eeprom_enh_txp_read_element() 410 !(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ)) iwl_eeprom_enh_txp_read_element() 420 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; iwl_eeprom_enhanced_txpower() local 436 txp = &txp_array[idx]; iwl_eeprom_enhanced_txpower() 438 if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID)) iwl_eeprom_enhanced_txpower() 442 (txp->channel && (txp->flags & iwl_eeprom_enhanced_txpower() 444 "Common " : (txp->channel) ? iwl_eeprom_enhanced_txpower() 446 (txp->channel), iwl_eeprom_enhanced_txpower() 455 txp->flags); iwl_eeprom_enhanced_txpower() 458 txp->chain_a_max, txp->chain_b_max, iwl_eeprom_enhanced_txpower() 459 txp->chain_c_max); iwl_eeprom_enhanced_txpower() 462 txp->mimo2_max, txp->mimo3_max, iwl_eeprom_enhanced_txpower() 463 ((txp->delta_20_in_40 & 0xf0) >> 4), iwl_eeprom_enhanced_txpower() 464 (txp->delta_20_in_40 & 0x0f)); iwl_eeprom_enhanced_txpower() 466 max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp); iwl_eeprom_enhanced_txpower() 468 iwl_eeprom_enh_txp_read_element(data, txp, n_channels, iwl_eeprom_enhanced_txpower() 354 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data, struct iwl_eeprom_enhanced_txpwr *txp) iwl_get_max_txpwr_half_dbm() argument 388 iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data, struct iwl_eeprom_enhanced_txpwr *txp, int n_channels, s8 max_txpower_avg) iwl_eeprom_enh_txp_read_element() argument
|
/linux-4.4.14/drivers/rtc/ |
H A D | rtc-rs5c348.c | 68 u8 txbuf[5+7], *txp; rs5c348_rtc_set_time() local 72 txp = txbuf; rs5c348_rtc_set_time() 78 txp = &txbuf[5]; rs5c348_rtc_set_time() 79 txp[RS5C348_REG_SECS] = bin2bcd(tm->tm_sec); rs5c348_rtc_set_time() 80 txp[RS5C348_REG_MINS] = bin2bcd(tm->tm_min); rs5c348_rtc_set_time() 82 txp[RS5C348_REG_HOURS] = bin2bcd(tm->tm_hour); rs5c348_rtc_set_time() 85 txp[RS5C348_REG_HOURS] = bin2bcd((tm->tm_hour + 11) % 12 + 1) | rs5c348_rtc_set_time() 88 txp[RS5C348_REG_WDAY] = bin2bcd(tm->tm_wday); rs5c348_rtc_set_time() 89 txp[RS5C348_REG_DAY] = bin2bcd(tm->tm_mday); rs5c348_rtc_set_time() 90 txp[RS5C348_REG_MONTH] = bin2bcd(tm->tm_mon + 1) | rs5c348_rtc_set_time() 92 txp[RS5C348_REG_YEAR] = bin2bcd(tm->tm_year % 100); rs5c348_rtc_set_time()
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
H A D | ipath_sdma.c | 98 struct ipath_sdma_txreq *txp = NULL; ipath_sdma_make_progress() local 105 txp = list_entry(lp, struct ipath_sdma_txreq, list); ipath_sdma_make_progress() 106 start_idx = txp->start_idx; ipath_sdma_make_progress() 121 if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC && ipath_sdma_make_progress() 134 if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) { ipath_sdma_make_progress() 136 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15) ipath_sdma_make_progress() 141 txp = list_entry(lp, struct ipath_sdma_txreq, ipath_sdma_make_progress() 143 start_idx = txp->start_idx; ipath_sdma_make_progress() 146 txp = NULL; ipath_sdma_make_progress() 161 struct ipath_sdma_txreq *txp, *txp_next; ipath_sdma_notify() local 163 list_for_each_entry_safe(txp, txp_next, list, list) { list_for_each_entry_safe() 164 list_del_init(&txp->list); list_for_each_entry_safe() 166 if (txp->callback) list_for_each_entry_safe() 167 (*txp->callback)(txp->callback_cookie, list_for_each_entry_safe() 168 txp->callback_status); list_for_each_entry_safe() 260 struct ipath_sdma_txreq *txp, *txpnext; sdma_abort_task() local 282 list_for_each_entry_safe(txp, txpnext, sdma_abort_task() 284 txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED; sdma_abort_task() 285 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15) sdma_abort_task() 287 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist); sdma_abort_task() 510 struct ipath_sdma_txreq *txp, *txpnext; teardown_sdma() local 536 list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist, teardown_sdma() 538 txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN; teardown_sdma() 539 if (txp->flags & IPATH_SDMA_TXREQ_F_VL15) teardown_sdma() 541 list_move_tail(&txp->list, &dd->ipath_sdma_notifylist); teardown_sdma()
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
H A D | qib_sdma.c | 127 struct qib_sdma_txreq *txp, *txp_next; clear_sdma_activelist() local 129 list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) { clear_sdma_activelist() 130 list_del_init(&txp->list); clear_sdma_activelist() 131 if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) { clear_sdma_activelist() 134 idx = txp->start_idx; clear_sdma_activelist() 135 while (idx != txp->next_descq_idx) { clear_sdma_activelist() 141 if (txp->callback) clear_sdma_activelist() 142 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED); clear_sdma_activelist() 351 struct qib_sdma_txreq *txp = NULL; qib_sdma_make_progress() local 362 * the next txp on the list. qib_sdma_make_progress() 367 txp = list_entry(lp, struct qib_sdma_txreq, list); qib_sdma_make_progress() 368 idx = txp->start_idx; qib_sdma_make_progress() 372 /* if desc is part of this txp, unmap if needed */ qib_sdma_make_progress() 373 if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) && qib_sdma_make_progress() 387 /* if now past this txp's descs, do the callback */ qib_sdma_make_progress() 388 if (txp && txp->next_descq_idx == ppd->sdma_descq_head) { qib_sdma_make_progress() 390 list_del_init(&txp->list); qib_sdma_make_progress() 391 if (txp->callback) qib_sdma_make_progress() 392 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK); qib_sdma_make_progress() 393 /* see if there is another txp */ qib_sdma_make_progress() 395 txp = NULL; qib_sdma_make_progress() 398 txp = list_entry(lp, struct qib_sdma_txreq, qib_sdma_make_progress() 400 idx = txp->start_idx; qib_sdma_make_progress() 723 struct qib_sdma_txreq *txp, *txpnext; dump_sdma_state() local 766 list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist, dump_sdma_state() 769 "SDMA txp->start_idx: %u txp->next_descq_idx: %u\n", dump_sdma_state() 770 txp->start_idx, txp->next_descq_idx); dump_sdma_state()
|
/linux-4.4.14/drivers/net/xen-netback/ |
H A D | netback.c | 97 struct xen_netif_tx_request *txp, 700 struct xen_netif_tx_request *txp, RING_IDX end) xenvif_tx_err() 707 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); xenvif_tx_err() 712 RING_COPY_REQUEST(&queue->tx, cons++, txp); xenvif_tx_err() 728 struct xen_netif_tx_request *txp, xenvif_count_requests() 777 txp = &dropped_tx; xenvif_count_requests() 779 RING_COPY_REQUEST(&queue->tx, cons + slots, txp); xenvif_count_requests() 790 if (!drop_err && txp->size > first->size) { xenvif_count_requests() 794 txp->size, first->size); xenvif_count_requests() 798 first->size -= txp->size; xenvif_count_requests() 801 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) { xenvif_count_requests() 802 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n", xenvif_count_requests() 803 txp->offset, txp->size); xenvif_count_requests() 808 more_data = txp->flags & XEN_NETTXF_more_data; xenvif_count_requests() 811 txp++; xenvif_count_requests() 832 struct xen_netif_tx_request *txp, xenvif_tx_create_map_op() 838 txp->gref, queue->vif->domid); xenvif_tx_create_map_op() 840 memcpy(&queue->pending_tx_info[pending_idx].req, txp, xenvif_tx_create_map_op() 841 sizeof(*txp)); xenvif_tx_create_map_op() 863 struct xen_netif_tx_request *txp, xenvif_get_requests() 881 shinfo->nr_frags++, txp++, gop++) { xenvif_get_requests() 884 xenvif_tx_create_map_op(queue, pending_idx, txp, gop); xenvif_get_requests() 894 shinfo->nr_frags++, txp++, gop++) { xenvif_get_requests() 897 xenvif_tx_create_map_op(queue, pending_idx, txp, gop); xenvif_get_requests() 1069 struct xen_netif_tx_request *txp; xenvif_fill_frags() local 1086 txp = &queue->pending_tx_info[pending_idx].req; xenvif_fill_frags() 1088 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size); xenvif_fill_frags() 1089 skb->len += txp->size; xenvif_fill_frags() 1090 skb->data_len += txp->size; xenvif_fill_frags() 1091 skb->truesize += txp->size; xenvif_fill_frags() 1566 struct xen_netif_tx_request *txp; xenvif_tx_submit() local 1571 txp = &queue->pending_tx_info[pending_idx].req; xenvif_tx_submit() 1591 if (data_len < txp->size) { xenvif_tx_submit() 1593 txp->offset += data_len; xenvif_tx_submit() 1594 txp->size -= data_len; xenvif_tx_submit() 1601 if (txp->flags & XEN_NETTXF_csum_blank) xenvif_tx_submit() 1603 else if (txp->flags & XEN_NETTXF_data_validated) xenvif_tx_submit() 1824 struct xen_netif_tx_request *txp, make_tx_response() 1831 resp->id = txp->id; make_tx_response() 1834 if (txp->flags & XEN_NETTXF_extra_info) make_tx_response() 699 xenvif_tx_err(struct xenvif_queue *queue, struct xen_netif_tx_request *txp, RING_IDX end) xenvif_tx_err() argument 726 xenvif_count_requests(struct xenvif_queue *queue, struct xen_netif_tx_request *first, struct xen_netif_tx_request *txp, int work_to_do) xenvif_count_requests() argument 830 xenvif_tx_create_map_op(struct xenvif_queue *queue, u16 pending_idx, struct xen_netif_tx_request *txp, struct gnttab_map_grant_ref *mop) xenvif_tx_create_map_op() argument 861 xenvif_get_requests(struct xenvif_queue *queue, struct sk_buff *skb, struct xen_netif_tx_request *txp, struct gnttab_map_grant_ref *gop, unsigned int frag_overflow, struct sk_buff *nskb) xenvif_get_requests() argument 1823 make_tx_response(struct xenvif_queue *queue, struct xen_netif_tx_request *txp, s8 st) make_tx_response() argument
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
H A D | sdma.c | 385 struct sdma_txreq *txp, *txp_next; sdma_flush() local 393 list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) { sdma_flush() 394 list_del_init(&txp->list); sdma_flush() 395 list_add_tail(&txp->list, &flushlist); sdma_flush() 399 list_for_each_entry_safe(txp, txp_next, &flushlist, list) { sdma_flush() 402 struct iowait *wait = txp->wait; sdma_flush() 404 list_del_init(&txp->list); sdma_flush() 406 trace_hfi1_sdma_out_sn(sde, txp->sn); sdma_flush() 407 if (WARN_ON_ONCE(sde->head_sn != txp->sn)) sdma_flush() 409 sde->head_sn, txp->sn); sdma_flush() 412 sdma_txclean(sde->dd, txp); sdma_flush() 415 if (txp->complete) sdma_flush() 416 (*txp->complete)(txp, SDMA_TXREQ_S_ABORTED, drained); sdma_flush() 573 struct sdma_txreq *txp = get_txhead(sde); sdma_flush_descq() local 578 * the next txp on the list. sdma_flush_descq() 585 /* if now past this txp's descs, do the callback */ sdma_flush_descq() 586 if (txp && txp->next_descq_idx == head) { sdma_flush_descq() 589 struct iowait *wait = txp->wait; sdma_flush_descq() 596 trace_hfi1_sdma_out_sn(sde, txp->sn); sdma_flush_descq() 597 if (WARN_ON_ONCE(sde->head_sn != txp->sn)) sdma_flush_descq() 599 sde->head_sn, txp->sn); sdma_flush_descq() 602 sdma_txclean(sde->dd, txp); sdma_flush_descq() 603 trace_hfi1_sdma_progress(sde, head, tail, txp); sdma_flush_descq() 604 if (txp->complete) sdma_flush_descq() 605 (*txp->complete)( sdma_flush_descq() 606 txp, sdma_flush_descq() 611 /* see if there is another txp */ sdma_flush_descq() 612 txp = get_txhead(sde); sdma_flush_descq() 1469 struct sdma_txreq *txp = NULL; sdma_make_progress() local 1479 * the next txp on the list. sdma_make_progress() 1483 txp = get_txhead(sde); sdma_make_progress() 1485 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); sdma_make_progress() 1490 /* if now past this txp's descs, do the callback */ sdma_make_progress() 1491 if (txp && txp->next_descq_idx == swhead) { sdma_make_progress() 1494 struct iowait *wait = txp->wait; sdma_make_progress() 1501 trace_hfi1_sdma_out_sn(sde, txp->sn); sdma_make_progress() 1502 if (WARN_ON_ONCE(sde->head_sn != txp->sn)) sdma_make_progress() 1504 sde->head_sn, txp->sn); sdma_make_progress() 1507 sdma_txclean(sde->dd, txp); sdma_make_progress() 1508 if (txp->complete) sdma_make_progress() 1509 (*txp->complete)( sdma_make_progress() 1510 txp, sdma_make_progress() 1515 /* see if there is another txp */ sdma_make_progress() 1516 txp = get_txhead(sde); sdma_make_progress() 1518 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); sdma_make_progress()
|
H A D | trace.h | 917 struct sdma_txreq *txp 919 TP_ARGS(sde, hwhead, swhead, txp), 936 __entry->txnext = txp ? txp->next_descq_idx : ~0; 938 __entry->sn = txp ? txp->sn : ~0; 958 struct sdma_txreq *txp 960 TP_ARGS(sde, hwhead, swhead, txp), 976 __entry->txnext = txp ? txp->next_descq_idx : ~0;
|
/linux-4.4.14/drivers/net/wireless/brcm80211/brcmsmac/ |
H A D | dma.c | 236 struct sk_buff **txp; member in struct:dma_info 652 di->txp = kzalloc(size, GFP_ATOMIC); dma_attach() 653 if (di->txp == NULL) dma_attach() 764 kfree(di->txp); dma_detach() 1309 di->txp[prevtxd(di, txout)] = p; dma_txenq() 1471 struct sk_buff *txp; dma_getnexttxp() local 1482 txp = NULL; dma_getnexttxp() 1509 for (i = start; i != end && !txp; i = nexttxd(di, i)) { dma_getnexttxp() 1522 txp = di->txp[i]; dma_getnexttxp() 1523 di->txp[i] = NULL; dma_getnexttxp() 1533 return txp; dma_getnexttxp() 1557 skb = di->txp[i]; dma_walk_packets()
|
/linux-4.4.14/drivers/net/irda/ |
H A D | donauboe.c | 706 int txp; toshoboe_probeinterrupt() local 711 txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK; toshoboe_probeinterrupt() 712 if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS) toshoboe_probeinterrupt() 1155 int txp, txpc; toshoboe_interrupt() local 1158 txp = self->txpending; toshoboe_interrupt() 1166 pr_debug("%s.txd(%x)%x/%x\n", __func__, irqstat, txp, self->txpending); toshoboe_interrupt() 1168 txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK; toshoboe_interrupt() 1171 if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS) toshoboe_interrupt() 1173 txpc = txp; toshoboe_interrupt() 1177 txp = txpc; toshoboe_interrupt() 1182 self->ring->tx[txp].control &= ~OBOE_CTL_TX_RTCENTX; toshoboe_interrupt()
|
/linux-4.4.14/drivers/net/wireless/brcm80211/brcmfmac/ |
H A D | core.c | 563 void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success) brcmf_txfinalize() argument 568 eh = (struct ethhdr *)(txp->data); brcmf_txfinalize() 580 brcmu_pkt_buf_free_skb(txp); brcmf_txfinalize() 583 void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success) brcmf_txcomplete() argument 592 brcmf_fws_bustxfail(drvr->fws, txp); brcmf_txcomplete() 594 if (brcmf_proto_hdrpull(drvr, false, txp, &ifp)) brcmf_txcomplete() 595 brcmu_pkt_buf_free_skb(txp); brcmf_txcomplete() 597 brcmf_txfinalize(ifp, txp, success); brcmf_txcomplete()
|
H A D | bus.h | 229 void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
|
H A D | core.h | 217 void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
|
/linux-4.4.14/drivers/net/ethernet/micrel/ |
H A D | ks8851.c | 700 * @txp: The sk_buff to transmit. 703 * Send the @txp to the chip. This means creating the relevant packet header 708 static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq) ks8851_wrpkt() argument 716 __func__, txp, txp->len, txp->data, irq); ks8851_wrpkt() 727 ks->txh.txw[2] = cpu_to_le16(txp->len); ks8851_wrpkt() 734 xfer->tx_buf = txp->data; ks8851_wrpkt() 736 xfer->len = ALIGN(txp->len, 4); ks8851_wrpkt()
|
/linux-4.4.14/drivers/net/wireless/mwifiex/ |
H A D | sta_cmdresp.c | 455 struct host_cmd_ds_rf_tx_pwr *txp = &resp->params.txp; mwifiex_ret_rf_tx_power() local 456 u16 action = le16_to_cpu(txp->action); mwifiex_ret_rf_tx_power() 458 priv->tx_power_level = le16_to_cpu(txp->cur_level); mwifiex_ret_rf_tx_power() 461 priv->max_tx_power_level = txp->max_power; mwifiex_ret_rf_tx_power() 462 priv->min_tx_power_level = txp->min_power; mwifiex_ret_rf_tx_power()
|
H A D | sta_cmd.c | 248 struct host_cmd_ds_txpwr_cfg *txp) mwifiex_cmd_tx_power_cfg() 258 if (txp->mode) { mwifiex_cmd_tx_power_cfg() 260 *) ((unsigned long) txp + mwifiex_cmd_tx_power_cfg() 262 memmove(cmd_txp_cfg, txp, mwifiex_cmd_tx_power_cfg() 274 memmove(cmd_txp_cfg, txp, sizeof(*txp)); mwifiex_cmd_tx_power_cfg() 293 struct host_cmd_ds_rf_tx_pwr *txp = &cmd->params.txp; mwifiex_cmd_rf_tx_power() local 298 txp->action = cpu_to_le16(cmd_action); mwifiex_cmd_rf_tx_power() 246 mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd, u16 cmd_action, struct host_cmd_ds_txpwr_cfg *txp) mwifiex_cmd_tx_power_cfg() argument
|
H A D | fw.h | 2113 struct host_cmd_ds_rf_tx_pwr txp; member in union:host_cmd_ds_command::__anon8544
|
/linux-4.4.14/drivers/net/wireless/iwlwifi/mvm/ |
H A D | rs.c | 2034 /* Too many failures, increase txp */ rs_get_tpc_action() 2037 IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n"); rs_get_tpc_action() 2047 "no weak txp measurement. decrease txp\n"); rs_get_tpc_action() 2053 "lower txp has better tpt. decrease txp\n"); rs_get_tpc_action() 2065 "higher txp has better tpt. increase txp\n"); rs_get_tpc_action() 2073 "lower txp has worse tpt. increase txp\n"); rs_get_tpc_action() 2078 IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n"); rs_get_tpc_action() 2119 "tpc is not allowed. remove txp restrictions\n"); rs_tpc_perform() 2145 IWL_DEBUG_RATE(mvm, "already in lowest txp, stay\n"); rs_tpc_perform() 2150 IWL_DEBUG_RATE(mvm, "already in highest txp, stay\n"); rs_tpc_perform()
|
/linux-4.4.14/arch/mips/include/asm/octeon/ |
H A D | cvmx-lmcx-defs.h | 3142 uint64_t txp:3; member in struct:cvmx_lmcx_timing_params1::cvmx_lmcx_timing_params1_s 3156 uint64_t txp:3; 3174 uint64_t txp:3; member in struct:cvmx_lmcx_timing_params1::cvmx_lmcx_timing_params1_cn63xxp1 3188 uint64_t txp:3;
|
/linux-4.4.14/drivers/staging/lustre/lnet/lnet/ |
H A D | lib-msg.c | 448 CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n", lnet_finalize()
|
/linux-4.4.14/drivers/net/wireless/ath/ath5k/ |
H A D | phy.c | 3431 /* Set txp.offset so that we can ath5k_setup_channel_powertable() 3451 /* Set txp.offset, note that table_min ath5k_setup_channel_powertable()
|
/linux-4.4.14/drivers/net/ethernet/broadcom/ |
H A D | bnx2.c | 3740 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) { bnx2_request_uncached_firmware() 3931 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp); bnx2_init_cpus()
|
H A D | bnx2.h | 7058 struct bnx2_mips_fw_file_entry txp; member in struct:bnx2_mips_fw_file
|