rx_ctx 389 drivers/mailbox/bcm-pdc-mailbox.c struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES]; rx_ctx 591 drivers/mailbox/bcm-pdc-mailbox.c struct pdc_rx_ctx *rx_ctx; rx_ctx 604 drivers/mailbox/bcm-pdc-mailbox.c (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd)) rx_ctx 620 drivers/mailbox/bcm-pdc-mailbox.c rx_ctx = &pdcs->rx_ctx[rx_idx]; rx_ctx 621 drivers/mailbox/bcm-pdc-mailbox.c num_frags = rx_ctx->rxin_numd; rx_ctx 623 drivers/mailbox/bcm-pdc-mailbox.c mssg.ctx = rx_ctx->rxp_ctx; rx_ctx 624 drivers/mailbox/bcm-pdc-mailbox.c rx_ctx->rxp_ctx = NULL; rx_ctx 625 drivers/mailbox/bcm-pdc-mailbox.c resp_hdr = rx_ctx->resp_hdr; rx_ctx 626 drivers/mailbox/bcm-pdc-mailbox.c resp_hdr_daddr = rx_ctx->resp_hdr_daddr; rx_ctx 627 drivers/mailbox/bcm-pdc-mailbox.c dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg), rx_ctx 822 drivers/mailbox/bcm-pdc-mailbox.c struct pdc_rx_ctx *rx_ctx; rx_ctx 846 drivers/mailbox/bcm-pdc-mailbox.c pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1; rx_ctx 851 drivers/mailbox/bcm-pdc-mailbox.c rx_ctx = &pdcs->rx_ctx[pdcs->rxout]; rx_ctx 852 drivers/mailbox/bcm-pdc-mailbox.c rx_ctx->rxp_ctx = ctx; rx_ctx 853 drivers/mailbox/bcm-pdc-mailbox.c rx_ctx->dst_sg = dst_sg; rx_ctx 854 drivers/mailbox/bcm-pdc-mailbox.c rx_ctx->resp_hdr = vaddr; rx_ctx 855 drivers/mailbox/bcm-pdc-mailbox.c rx_ctx->resp_hdr_daddr = daddr; rx_ctx 923 drivers/mailbox/bcm-pdc-mailbox.c pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w; rx_ctx 3250 drivers/net/ethernet/intel/i40e/i40e_main.c struct i40e_hmc_obj_rxq rx_ctx; rx_ctx 3258 drivers/net/ethernet/intel/i40e/i40e_main.c memset(&rx_ctx, 0, sizeof(rx_ctx)); rx_ctx 3293 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, rx_ctx 3296 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ctx.base = (ring->dma / 128); rx_ctx 3297 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ctx.qlen = ring->count; rx_ctx 3300 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ctx.dsize = 1; rx_ctx 3305 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ctx.hsplit_0 = 0; rx_ctx 3307 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); rx_ctx 3309 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ctx.lrxqthresh = 0; rx_ctx 3311 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ctx.lrxqthresh = 1; rx_ctx 3312 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ctx.crcstrip = 1; rx_ctx 3313 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ctx.l2tsel = 1; rx_ctx 3315 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ctx.showiv = 0; rx_ctx 3317 drivers/net/ethernet/intel/i40e/i40e_main.c rx_ctx.prefena = 1; rx_ctx 3329 drivers/net/ethernet/intel/i40e/i40e_main.c err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx); rx_ctx 626 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c struct i40e_hmc_obj_rxq rx_ctx; rx_ctx 633 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq)); rx_ctx 636 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c rx_ctx.base = info->dma_ring_addr / 128; rx_ctx 637 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c rx_ctx.qlen = info->ring_len; rx_ctx 640 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | rx_ctx 649 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; rx_ctx 652 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; rx_ctx 660 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; rx_ctx 667 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c rx_ctx.rxmax = info->max_pkt_size; rx_ctx 670 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c rx_ctx.dsize = 1; rx_ctx 673 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c rx_ctx.lrxqthresh = 1; rx_ctx 674 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c rx_ctx.crcstrip = 1; rx_ctx 675 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c rx_ctx.prefena = 1; rx_ctx 676 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c rx_ctx.l2tsel = 1; rx_ctx 689 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx); rx_ctx 136 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c struct mlx5e_tls_offload_context_rx *rx_ctx = rx_ctx 139 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c rx_ctx->handle = htonl(swid); rx_ctx 169 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c struct mlx5e_tls_offload_context_rx *rx_ctx; rx_ctx 174 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c rx_ctx = mlx5e_get_tls_rx_context(tls_ctx); rx_ctx 178 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn); rx_ctx 1287 drivers/net/vmxnet3/vmxnet3_drv.c struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; rx_ctx 1743 drivers/net/vmxnet3/vmxnet3_drv.c rq->rx_ctx.skb = NULL; rx_ctx 289 drivers/net/vmxnet3/vmxnet3_int.h struct vmxnet3_rx_ctx rx_ctx; rx_ctx 623 include/net/tls.h struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); rx_ctx 625 include/net/tls.h atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); rx_ctx 250 net/mac80211/tkip.c struct tkip_ctx_rx *rx_ctx = &key->u.tkip.rx[queue]; rx_ctx 275 net/mac80211/tkip.c if (iv32 < rx_ctx->iv32 || rx_ctx 276 net/mac80211/tkip.c (iv32 == rx_ctx->iv32 && rx_ctx 277 net/mac80211/tkip.c (iv16 < rx_ctx->iv16 || rx_ctx 278 net/mac80211/tkip.c (iv16 == rx_ctx->iv16 && rx_ctx 279 net/mac80211/tkip.c (rx_ctx->iv32 || rx_ctx->iv16 || rx_ctx 280 net/mac80211/tkip.c rx_ctx->ctx.state != TKIP_STATE_NOT_INIT))))) rx_ctx 285 net/mac80211/tkip.c rx_ctx->ctx.state = TKIP_STATE_PHASE1_HW_UPLOADED; rx_ctx 289 net/mac80211/tkip.c if (rx_ctx->ctx.state == TKIP_STATE_NOT_INIT || rx_ctx 290 net/mac80211/tkip.c rx_ctx->iv32 != iv32) { rx_ctx 292 net/mac80211/tkip.c tkip_mixing_phase1(tk, &rx_ctx->ctx, ta, iv32); rx_ctx 296 net/mac80211/tkip.c rx_ctx->ctx.state != TKIP_STATE_PHASE1_HW_UPLOADED) { rx_ctx 303 net/mac80211/tkip.c iv32, rx_ctx->ctx.p1k); rx_ctx 304 net/mac80211/tkip.c rx_ctx->ctx.state = TKIP_STATE_PHASE1_HW_UPLOADED; rx_ctx 307 net/mac80211/tkip.c tkip_mixing_phase2(tk, &rx_ctx->ctx, iv16, rc4key); rx_ctx 680 net/tls/tls_device.c struct tls_offload_context_rx *rx_ctx; rx_ctx 691 net/tls/tls_device.c rx_ctx = tls_offload_ctx_rx(tls_ctx); rx_ctx 694 net/tls/tls_device.c switch (rx_ctx->resync_type) { rx_ctx 696 net/tls/tls_device.c resync_req = atomic64_read(&rx_ctx->resync_req); rx_ctx 702 net/tls/tls_device.c !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) rx_ctx 706 net/tls/tls_device.c if (likely(!rx_ctx->resync_nh_do_now)) rx_ctx 715 net/tls/tls_device.c rx_ctx->resync_nh_do_now = 0; rx_ctx 2248 net/tls/tls_sw.c struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx); rx_ctx 2251 net/tls/tls_sw.c rx_ctx->saved_data_ready = sk->sk_data_ready; rx_ctx 2255 net/tls/tls_sw.c strp_check_rcv(&rx_ctx->strp); rx_ctx 36 net/wireless/lib80211_crypt_wep.c struct arc4_ctx rx_ctx; rx_ctx 177 net/wireless/lib80211_crypt_wep.c arc4_setkey(&wep->rx_ctx, key, klen); rx_ctx 178 net/wireless/lib80211_crypt_wep.c arc4_crypt(&wep->rx_ctx, pos, pos, plen + 4);