offload 127 arch/um/drivers/vector_user.c int err = -ENOMEM, offload; offload 144 arch/um/drivers/vector_user.c offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6; offload 145 arch/um/drivers/vector_user.c ioctl(fd, TUNSETOFFLOAD, offload); offload 458 drivers/infiniband/hw/mlx5/srq_cmd.c MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV); offload 269 drivers/net/can/flexcan.c struct can_rx_offload offload; offload 730 drivers/net/can/flexcan.c err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); offload 777 drivers/net/can/flexcan.c err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); offload 782 drivers/net/can/flexcan.c static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) offload 784 drivers/net/can/flexcan.c return container_of(offload, struct flexcan_priv, offload); offload 787 drivers/net/can/flexcan.c static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, offload 791 drivers/net/can/flexcan.c struct flexcan_priv *priv = rx_offload_to_priv(offload); offload 814 drivers/net/can/flexcan.c offload->dev->stats.rx_over_errors++; offload 815 drivers/net/can/flexcan.c offload->dev->stats.rx_errors++; offload 893 drivers/net/can/flexcan.c ret = can_rx_offload_irq_offload_timestamp(&priv->offload, offload 904 drivers/net/can/flexcan.c can_rx_offload_irq_offload_fifo(&priv->offload); offload 924 drivers/net/can/flexcan.c stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload, offload 1146 drivers/net/can/flexcan.c for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) { offload 1291 drivers/net/can/flexcan.c priv->offload.mailbox_read = flexcan_mailbox_read; offload 1296 drivers/net/can/flexcan.c priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST; offload 1297 drivers/net/can/flexcan.c priv->offload.mb_last = priv->mb_count - 2; offload 1299 drivers/net/can/flexcan.c imask = GENMASK_ULL(priv->offload.mb_last, offload 1300 drivers/net/can/flexcan.c priv->offload.mb_first); offload 1304 drivers/net/can/flexcan.c err = can_rx_offload_add_timestamp(dev, &priv->offload); offload 1308 drivers/net/can/flexcan.c err = can_rx_offload_add_fifo(dev, &priv->offload, offload 1321 drivers/net/can/flexcan.c can_rx_offload_enable(&priv->offload); offload 1327 drivers/net/can/flexcan.c can_rx_offload_del(&priv->offload); offload 1343 drivers/net/can/flexcan.c can_rx_offload_disable(&priv->offload); offload 1346 drivers/net/can/flexcan.c can_rx_offload_del(&priv->offload); offload 21 drivers/net/can/rx-offload.c static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b) offload 23 drivers/net/can/rx-offload.c if (offload->inc) offload 29 drivers/net/can/rx-offload.c static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) offload 31 drivers/net/can/rx-offload.c if (offload->inc) offload 39 drivers/net/can/rx-offload.c struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi); offload 40 drivers/net/can/rx-offload.c struct net_device *dev = offload->dev; offload 46 drivers/net/can/rx-offload.c (skb = skb_dequeue(&offload->skb_queue))) { offload 59 drivers/net/can/rx-offload.c if (!skb_queue_empty(&offload->skb_queue)) offload 60 drivers/net/can/rx-offload.c napi_reschedule(&offload->napi); offload 63 drivers/net/can/rx-offload.c can_led_event(offload->dev, CAN_LED_EVENT_RX); offload 132 drivers/net/can/rx-offload.c can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) offload 139 drivers/net/can/rx-offload.c if (likely(skb_queue_len(&offload->skb_queue) < offload 140 drivers/net/can/rx-offload.c offload->skb_queue_len_max)) { offload 141 drivers/net/can/rx-offload.c skb = alloc_can_skb(offload->dev, &cf); offload 155 drivers/net/can/rx-offload.c ret = offload->mailbox_read(offload, &cf_overflow, offload 167 drivers/net/can/rx-offload.c offload->dev->stats.rx_dropped++; offload 168 drivers/net/can/rx-offload.c offload->dev->stats.rx_fifo_errors++; offload 180 drivers/net/can/rx-offload.c ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); offload 192 drivers/net/can/rx-offload.c offload->dev->stats.rx_dropped++; offload 193 drivers/net/can/rx-offload.c offload->dev->stats.rx_fifo_errors++; offload 202 drivers/net/can/rx-offload.c int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending) offload 209 drivers/net/can/rx-offload.c for (i = offload->mb_first; offload 210 drivers/net/can/rx-offload.c can_rx_offload_le(offload, i, offload->mb_last); offload 211 drivers/net/can/rx-offload.c can_rx_offload_inc(offload, &i)) { offload 217 drivers/net/can/rx-offload.c skb = can_rx_offload_offload_one(offload, i); offload 228 drivers/net/can/rx-offload.c spin_lock_irqsave(&offload->skb_queue.lock, flags); offload 229 drivers/net/can/rx-offload.c skb_queue_splice_tail(&skb_queue, &offload->skb_queue); offload 230 drivers/net/can/rx-offload.c spin_unlock_irqrestore(&offload->skb_queue.lock, flags); offload 232 drivers/net/can/rx-offload.c if ((queue_len = skb_queue_len(&offload->skb_queue)) > offload 233 drivers/net/can/rx-offload.c (offload->skb_queue_len_max / 8)) offload 234 drivers/net/can/rx-offload.c netdev_dbg(offload->dev, "%s: queue_len=%d\n", offload 237 drivers/net/can/rx-offload.c can_rx_offload_schedule(offload); offload 244 drivers/net/can/rx-offload.c int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) offload 250 drivers/net/can/rx-offload.c skb = can_rx_offload_offload_one(offload, 0); offload 256 drivers/net/can/rx-offload.c skb_queue_tail(&offload->skb_queue, skb); offload 261 drivers/net/can/rx-offload.c can_rx_offload_schedule(offload); offload 267 drivers/net/can/rx-offload.c int can_rx_offload_queue_sorted(struct can_rx_offload *offload, offload 273 drivers/net/can/rx-offload.c if (skb_queue_len(&offload->skb_queue) > offload 274 drivers/net/can/rx-offload.c offload->skb_queue_len_max) { offload 282 drivers/net/can/rx-offload.c spin_lock_irqsave(&offload->skb_queue.lock, flags); offload 283 drivers/net/can/rx-offload.c __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare); offload 284 drivers/net/can/rx-offload.c spin_unlock_irqrestore(&offload->skb_queue.lock, flags); offload 286 drivers/net/can/rx-offload.c can_rx_offload_schedule(offload); offload 292 drivers/net/can/rx-offload.c unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, offload 295 drivers/net/can/rx-offload.c struct net_device *dev = offload->dev; offload 305 drivers/net/can/rx-offload.c err = can_rx_offload_queue_sorted(offload, skb, timestamp); offload 315 drivers/net/can/rx-offload.c int can_rx_offload_queue_tail(struct can_rx_offload *offload, offload 318 drivers/net/can/rx-offload.c if (skb_queue_len(&offload->skb_queue) > offload 319 drivers/net/can/rx-offload.c offload->skb_queue_len_max) { offload 324 drivers/net/can/rx-offload.c skb_queue_tail(&offload->skb_queue, skb); offload 325 drivers/net/can/rx-offload.c can_rx_offload_schedule(offload); offload 331 drivers/net/can/rx-offload.c static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) offload 333 drivers/net/can/rx-offload.c offload->dev = dev; offload 336 drivers/net/can/rx-offload.c offload->skb_queue_len_max = 2 << fls(weight); offload 337 drivers/net/can/rx-offload.c offload->skb_queue_len_max *= 4; offload 338 drivers/net/can/rx-offload.c skb_queue_head_init(&offload->skb_queue); offload 340 drivers/net/can/rx-offload.c can_rx_offload_reset(offload); offload 341 drivers/net/can/rx-offload.c netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight); offload 344 drivers/net/can/rx-offload.c __func__, offload->skb_queue_len_max); offload 349 drivers/net/can/rx-offload.c int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload) offload 353 drivers/net/can/rx-offload.c if (offload->mb_first > BITS_PER_LONG_LONG || offload 354 drivers/net/can/rx-offload.c offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read) offload 357 drivers/net/can/rx-offload.c if (offload->mb_first < offload->mb_last) { offload 358 drivers/net/can/rx-offload.c offload->inc = true; offload 359 drivers/net/can/rx-offload.c weight = offload->mb_last - offload->mb_first; offload 361 drivers/net/can/rx-offload.c offload->inc = false; offload 362 drivers/net/can/rx-offload.c weight = offload->mb_first - offload->mb_last; offload 365 drivers/net/can/rx-offload.c return can_rx_offload_init_queue(dev, offload, weight); offload 369 drivers/net/can/rx-offload.c int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) offload 371 drivers/net/can/rx-offload.c if (!offload->mailbox_read) offload 374 drivers/net/can/rx-offload.c return can_rx_offload_init_queue(dev, offload, weight); offload 378 drivers/net/can/rx-offload.c void can_rx_offload_enable(struct can_rx_offload *offload) offload 380 drivers/net/can/rx-offload.c can_rx_offload_reset(offload); offload 381 drivers/net/can/rx-offload.c napi_enable(&offload->napi); offload 385 drivers/net/can/rx-offload.c void can_rx_offload_del(struct can_rx_offload *offload) offload 387 drivers/net/can/rx-offload.c netif_napi_del(&offload->napi); offload 388 drivers/net/can/rx-offload.c skb_queue_purge(&offload->skb_queue); offload 392 drivers/net/can/rx-offload.c void can_rx_offload_reset(struct can_rx_offload *offload) offload 189 drivers/net/can/ti_hecc.c struct can_rx_offload offload; offload 533 drivers/net/can/ti_hecc.c struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload) offload 535 drivers/net/can/ti_hecc.c return container_of(offload, struct ti_hecc_priv, offload); offload 538 drivers/net/can/ti_hecc.c static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload, offload 542 drivers/net/can/ti_hecc.c struct ti_hecc_priv *priv = rx_offload_to_priv(offload); offload 622 drivers/net/can/ti_hecc.c err = can_rx_offload_queue_sorted(&priv->offload, skb, offload 657 drivers/net/can/ti_hecc.c err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); offload 745 drivers/net/can/ti_hecc.c can_rx_offload_get_echo_skb(&priv->offload, offload 761 drivers/net/can/ti_hecc.c can_rx_offload_irq_offload_timestamp(&priv->offload, offload 804 drivers/net/can/ti_hecc.c can_rx_offload_enable(&priv->offload); offload 815 drivers/net/can/ti_hecc.c can_rx_offload_disable(&priv->offload); offload 942 drivers/net/can/ti_hecc.c priv->offload.mailbox_read = ti_hecc_mailbox_read; offload 943 drivers/net/can/ti_hecc.c priv->offload.mb_first = HECC_RX_FIRST_MBOX; offload 944 drivers/net/can/ti_hecc.c priv->offload.mb_last = HECC_RX_LAST_MBOX; offload 945 drivers/net/can/ti_hecc.c err = can_rx_offload_add_timestamp(ndev, &priv->offload); offload 965 drivers/net/can/ti_hecc.c can_rx_offload_del(&priv->offload); offload 982 drivers/net/can/ti_hecc.c can_rx_offload_del(&priv->offload); offload 134 drivers/net/dsa/sja1105/sja1105_tas.c if (tas_data->offload[port]) { offload 135 drivers/net/dsa/sja1105/sja1105_tas.c num_entries += tas_data->offload[port]->num_entries; offload 191 drivers/net/dsa/sja1105/sja1105_tas.c const struct tc_taprio_qopt_offload *offload; offload 193 drivers/net/dsa/sja1105/sja1105_tas.c offload = tas_data->offload[port]; offload 194 drivers/net/dsa/sja1105/sja1105_tas.c if (!offload) offload 198 drivers/net/dsa/sja1105/sja1105_tas.c schedule_end_idx = k + offload->num_entries - 1; offload 219 drivers/net/dsa/sja1105/sja1105_tas.c for (i = 0; i < offload->num_entries; i++, k++) { offload 220 drivers/net/dsa/sja1105/sja1105_tas.c s64 delta_ns = offload->entries[i].interval; offload 226 drivers/net/dsa/sja1105/sja1105_tas.c ~offload->entries[i].gate_mask; offload 260 drivers/net/dsa/sja1105/sja1105_tas.c const struct tc_taprio_qopt_offload *offload; offload 269 drivers/net/dsa/sja1105/sja1105_tas.c offload = tas_data->offload[port]; offload 270 drivers/net/dsa/sja1105/sja1105_tas.c if (!offload) offload 276 drivers/net/dsa/sja1105/sja1105_tas.c max_cycle_time = max(offload->cycle_time, admin->cycle_time); offload 277 drivers/net/dsa/sja1105/sja1105_tas.c min_cycle_time = min(offload->cycle_time, admin->cycle_time); offload 286 drivers/net/dsa/sja1105/sja1105_tas.c div_s64_rem(offload->base_time, offload->cycle_time, &rem); offload 298 drivers/net/dsa/sja1105/sja1105_tas.c i < offload->num_entries; offload 299 drivers/net/dsa/sja1105/sja1105_tas.c delta1 += offload->entries[i].interval, i++) { offload 312 drivers/net/dsa/sja1105/sja1105_tas.c t1 += offload->cycle_time) { offload 344 drivers/net/dsa/sja1105/sja1105_tas.c if (!!tas_data->offload[port] == admin->enable) offload 348 drivers/net/dsa/sja1105/sja1105_tas.c taprio_offload_free(tas_data->offload[port]); offload 349 drivers/net/dsa/sja1105/sja1105_tas.c tas_data->offload[port] = NULL; offload 397 drivers/net/dsa/sja1105/sja1105_tas.c tas_data->offload[port] = taprio_offload_get(admin); offload 413 drivers/net/dsa/sja1105/sja1105_tas.c struct tc_taprio_qopt_offload *offload; offload 417 drivers/net/dsa/sja1105/sja1105_tas.c offload = priv->tas_data.offload[port]; offload 418 drivers/net/dsa/sja1105/sja1105_tas.c if (!offload) offload 421 drivers/net/dsa/sja1105/sja1105_tas.c taprio_offload_free(offload); offload 12 drivers/net/dsa/sja1105/sja1105_tas.h struct tc_taprio_qopt_offload *offload[SJA1105_NUM_PORTS]; offload 291 drivers/net/ethernet/3com/typhoon.c __le32 offload; offload 1242 drivers/net/ethernet/3com/typhoon.c tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM; offload 1243 drivers/net/ethernet/3com/typhoon.c tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON; offload 1244 drivers/net/ethernet/3com/typhoon.c tp->offload |= TYPHOON_OFFLOAD_VLAN; offload 1920 drivers/net/ethernet/3com/typhoon.c xp_cmd.parm2 = tp->offload; offload 1921 drivers/net/ethernet/3com/typhoon.c xp_cmd.parm3 = tp->offload; offload 912 drivers/net/ethernet/amazon/ena/ena_admin_defs.h struct ena_admin_feature_offload_desc offload; offload 1959 drivers/net/ethernet/amazon/ena/ena_com.c memcpy(&get_feat_ctx->offload, &get_resp.u.offload, offload 1960 drivers/net/ethernet/amazon/ena/ena_com.c sizeof(get_resp.u.offload)); offload 2207 drivers/net/ethernet/amazon/ena/ena_com.c struct ena_admin_feature_offload_desc *offload) offload 2219 drivers/net/ethernet/amazon/ena/ena_com.c memcpy(offload, &resp.u.offload, sizeof(resp.u.offload)); offload 359 drivers/net/ethernet/amazon/ena/ena_com.h struct ena_admin_feature_offload_desc offload; offload 638 drivers/net/ethernet/amazon/ena/ena_com.h struct ena_admin_feature_offload_desc *offload); offload 3238 drivers/net/ethernet/amazon/ena/ena_netdev.c if (feat->offload.tx & offload 3242 drivers/net/ethernet/amazon/ena/ena_netdev.c if (feat->offload.tx & offload 3246 drivers/net/ethernet/amazon/ena/ena_netdev.c if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) offload 3249 drivers/net/ethernet/amazon/ena/ena_netdev.c if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) offload 3252 drivers/net/ethernet/amazon/ena/ena_netdev.c if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) offload 3255 drivers/net/ethernet/amazon/ena/ena_netdev.c if (feat->offload.rx_supported & offload 3259 drivers/net/ethernet/amazon/ena/ena_netdev.c if (feat->offload.rx_supported & offload 399 drivers/net/ethernet/chelsio/cxgb3/common.h unsigned int offload; offload 629 drivers/net/ethernet/chelsio/cxgb3/common.h return adap->params.offload; offload 3710 drivers/net/ethernet/chelsio/cxgb3/t3_hw.c adapter->params.offload = t3_mc7_size(&adapter->pmrx) && offload 393 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h unsigned char offload; offload 1279 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h return adap->params.offload; offload 1294 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h return (adap->params.offload || adap->params.crypto); offload 4688 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->params.offload = 1; offload 5090 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->params.offload = 0; offload 5293 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->params.offload = 0; offload 5958 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adapter->params.offload = 0; offload 5969 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adapter->params.offload = 0; offload 5979 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adapter->params.offload = 0; offload 5996 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adapter->params.offload = 0; offload 422 drivers/net/ethernet/huawei/hinic/hinic_tx.c enum hinic_offload_type offload = 0; offload 428 drivers/net/ethernet/huawei/hinic/hinic_tx.c offload |= TX_OFFLOAD_TSO; offload 432 drivers/net/ethernet/huawei/hinic/hinic_tx.c offload |= TX_OFFLOAD_CSUM; offload 441 drivers/net/ethernet/huawei/hinic/hinic_tx.c offload |= TX_OFFLOAD_VLAN; offload 444 drivers/net/ethernet/huawei/hinic/hinic_tx.c if (offload) offload 2109 drivers/net/ethernet/intel/ice/ice_txrx.c struct ice_tx_offload_params offload = { 0 }; offload 2135 drivers/net/ethernet/intel/ice/ice_txrx.c offload.tx_ring = tx_ring; offload 2149 drivers/net/ethernet/intel/ice/ice_txrx.c tso = ice_tso(first, &offload); offload 2154 drivers/net/ethernet/intel/ice/ice_txrx.c csum = ice_tx_csum(first, &offload); offload 2162 drivers/net/ethernet/intel/ice/ice_txrx.c offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | offload 2166 drivers/net/ethernet/intel/ice/ice_txrx.c if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { offload 2176 drivers/net/ethernet/intel/ice/ice_txrx.c cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); offload 2177 drivers/net/ethernet/intel/ice/ice_txrx.c cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); offload 2179 drivers/net/ethernet/intel/ice/ice_txrx.c cdesc->qw1 = cpu_to_le64(offload.cd_qw1); offload 2182 drivers/net/ethernet/intel/ice/ice_txrx.c ice_tx_map(tx_ring, first, &offload); offload 354 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c bool offload) offload 356 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c if (offload) offload 364 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c bool offload; offload 366 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP; offload 367 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c mlxsw_sp_mr_mfc_offload_set(mr_route, offload); offload 195 drivers/net/ethernet/netronome/nfp/bpf/offload.c prog->aux->offload->dev_priv = nfp_prog; offload 199 drivers/net/ethernet/netronome/nfp/bpf/offload.c nfp_prog->bpf = bpf_offload_dev_priv(prog->aux->offload->offdev); offload 217 drivers/net/ethernet/netronome/nfp/bpf/offload.c struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev); offload 218 drivers/net/ethernet/netronome/nfp/bpf/offload.c struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; offload 223 drivers/net/ethernet/netronome/nfp/bpf/offload.c if (prog->aux->offload->opt_failed) offload 237 drivers/net/ethernet/netronome/nfp/bpf/offload.c prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64); offload 238 drivers/net/ethernet/netronome/nfp/bpf/offload.c prog->aux->offload->jited_image = nfp_prog->prog; offload 245 drivers/net/ethernet/netronome/nfp/bpf/offload.c struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; offload 484 drivers/net/ethernet/netronome/nfp/bpf/offload.c struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; offload 629 drivers/net/ethernet/netronome/nfp/bpf/verifier.c struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; offload 766 drivers/net/ethernet/netronome/nfp/bpf/verifier.c nfp_prog = env->prog->aux->offload->dev_priv; offload 789 drivers/net/ethernet/netronome/nfp/bpf/verifier.c nn = netdev_priv(env->prog->aux->offload->netdev); offload 805 drivers/net/ethernet/netronome/nfp/bpf/verifier.c struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; offload 838 drivers/net/ethernet/netronome/nfp/bpf/verifier.c struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; offload 67 drivers/net/netdevsim/bpf.c state = env->prog->aux->offload->dev_priv; offload 91 drivers/net/netdevsim/bpf.c if (!prog || !prog->aux->offload) offload 94 drivers/net/netdevsim/bpf.c state = prog->aux->offload->dev_priv; offload 142 drivers/net/netdevsim/bpf.c if (prog && !prog->aux->offload && !ns->bpf_tc_non_bound_accept) { offload 245 drivers/net/netdevsim/bpf.c prog->aux->offload->dev_priv = state; offload 253 drivers/net/netdevsim/bpf.c bpf_offload_dev_priv(prog->aux->offload->offdev); offload 263 drivers/net/netdevsim/bpf.c struct nsim_bpf_bound_prog *state = prog->aux->offload->dev_priv; offload 273 drivers/net/netdevsim/bpf.c state = prog->aux->offload->dev_priv; offload 291 drivers/net/netdevsim/bpf.c if (bpf->prog && bpf->prog->aux->offload) { offload 310 drivers/net/netdevsim/bpf.c if (!bpf->prog->aux->offload) { offload 319 drivers/net/netdevsim/bpf.c state = bpf->prog->aux->offload->dev_priv; offload 2937 drivers/net/wireless/ath/ath10k/htt_rx.c bool offload; offload 2953 drivers/net/wireless/ath/ath10k/htt_rx.c offload = !!(resp->rx_in_ord_ind.info & offload 2959 drivers/net/wireless/ath/ath10k/htt_rx.c vdev_id, peer_id, tid, offload, frag, msdu_count); offload 2986 drivers/net/wireless/ath/ath10k/htt_rx.c if (offload) offload 1376 fs/cifs/cifsglob.h bool offload); offload 441 fs/cifs/file.c bool wait_oplock_handler, bool offload) offload 509 fs/cifs/file.c if (offload) offload 394 include/linux/bpf.h struct bpf_prog_offload *offload; offload 18 include/linux/can/rx-offload.h unsigned int (*mailbox_read)(struct can_rx_offload *offload, offload 34 include/linux/can/rx-offload.h struct can_rx_offload *offload); offload 36 include/linux/can/rx-offload.h struct can_rx_offload *offload, offload 38 include/linux/can/rx-offload.h int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, offload 40 include/linux/can/rx-offload.h int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); offload 41 include/linux/can/rx-offload.h int can_rx_offload_queue_sorted(struct can_rx_offload *offload, offload 43 include/linux/can/rx-offload.h unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, offload 45 include/linux/can/rx-offload.h int can_rx_offload_queue_tail(struct can_rx_offload *offload, offload 47 include/linux/can/rx-offload.h void can_rx_offload_reset(struct can_rx_offload *offload); offload 48 include/linux/can/rx-offload.h void can_rx_offload_del(struct can_rx_offload *offload); offload 49 include/linux/can/rx-offload.h void can_rx_offload_enable(struct can_rx_offload *offload); offload 51 include/linux/can/rx-offload.h static inline void can_rx_offload_schedule(struct can_rx_offload *offload) offload 53 include/linux/can/rx-offload.h napi_schedule(&offload->napi); offload 56 include/linux/can/rx-offload.h static inline void can_rx_offload_disable(struct can_rx_offload *offload) offload 58 include/linux/can/rx-offload.h napi_disable(&offload->napi); offload 3629 include/linux/mlx5/mlx5_ifc.h u8 offload[0x4]; offload 326 include/net/flow_offload.h struct flow_block_offload *offload) offload 328 include/net/flow_offload.h list_add_tail(&block_cb->list, &offload->cb_list); offload 332 include/net/flow_offload.h struct flow_block_offload *offload) offload 334 include/net/flow_offload.h list_move(&block_cb->list, &offload->cb_list); offload 803 include/net/netfilter/nf_tables.h int (*offload)(struct nft_offload_ctx *ctx, offload 189 include/net/pkt_sched.h *offload); offload 190 include/net/pkt_sched.h void taprio_offload_free(struct tc_taprio_qopt_offload *offload); offload 1546 include/net/xfrm.h int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload); offload 970 include/uapi/linux/pkt_sched.h __u8 offload; offload 83 kernel/bpf/offload.c struct bpf_prog_offload *offload; offload 93 kernel/bpf/offload.c offload = kzalloc(sizeof(*offload), GFP_USER); offload 94 kernel/bpf/offload.c if (!offload) offload 97 kernel/bpf/offload.c offload->prog = prog; offload 99 kernel/bpf/offload.c offload->netdev = dev_get_by_index(current->nsproxy->net_ns, offload 101 kernel/bpf/offload.c err = bpf_dev_offload_check(offload->netdev); offload 106 kernel/bpf/offload.c ondev = bpf_offload_find_netdev(offload->netdev); offload 111 kernel/bpf/offload.c offload->offdev = ondev->offdev; offload 112 kernel/bpf/offload.c prog->aux->offload = offload; offload 113 kernel/bpf/offload.c list_add_tail(&offload->offloads, &ondev->progs); offload 114 kernel/bpf/offload.c dev_put(offload->netdev); offload 121 kernel/bpf/offload.c if (offload->netdev) offload 122 kernel/bpf/offload.c dev_put(offload->netdev); offload 123 kernel/bpf/offload.c kfree(offload); offload 129 kernel/bpf/offload.c struct bpf_prog_offload *offload; offload 133 kernel/bpf/offload.c offload = prog->aux->offload; offload 134 kernel/bpf/offload.c if (offload) { offload 135 kernel/bpf/offload.c ret = offload->offdev->ops->prepare(prog); offload 136 kernel/bpf/offload.c offload->dev_state = !ret; offload 146 kernel/bpf/offload.c struct bpf_prog_offload *offload; offload 150 kernel/bpf/offload.c offload = env->prog->aux->offload; offload 151 kernel/bpf/offload.c if (offload) offload 152 kernel/bpf/offload.c ret = offload->offdev->ops->insn_hook(env, insn_idx, offload 161 kernel/bpf/offload.c struct bpf_prog_offload *offload; offload 165 kernel/bpf/offload.c offload = env->prog->aux->offload; offload 166 kernel/bpf/offload.c if (offload) { offload 167 kernel/bpf/offload.c if (offload->offdev->ops->finalize) offload 168 kernel/bpf/offload.c ret = offload->offdev->ops->finalize(env); offload 182 kernel/bpf/offload.c struct bpf_prog_offload *offload; offload 186 kernel/bpf/offload.c offload = env->prog->aux->offload; offload 187 kernel/bpf/offload.c if (offload) { offload 188 kernel/bpf/offload.c ops = offload->offdev->ops; offload 189 kernel/bpf/offload.c if (!offload->opt_failed && ops->replace_insn) offload 191 kernel/bpf/offload.c offload->opt_failed |= ret; offload 199 kernel/bpf/offload.c struct bpf_prog_offload *offload; offload 203 kernel/bpf/offload.c offload = env->prog->aux->offload; offload 204 kernel/bpf/offload.c if (offload) { offload 205 kernel/bpf/offload.c if (!offload->opt_failed && offload->offdev->ops->remove_insns) offload 206 kernel/bpf/offload.c ret = offload->offdev->ops->remove_insns(env, off, cnt); offload 207 kernel/bpf/offload.c offload->opt_failed |= ret; offload 214 kernel/bpf/offload.c struct bpf_prog_offload *offload = prog->aux->offload; offload 216 kernel/bpf/offload.c if (offload->dev_state) offload 217 kernel/bpf/offload.c offload->offdev->ops->destroy(prog); offload 222 kernel/bpf/offload.c list_del_init(&offload->offloads); offload 223 kernel/bpf/offload.c kfree(offload); offload 224 kernel/bpf/offload.c prog->aux->offload = NULL; offload 230 kernel/bpf/offload.c if (prog->aux->offload) offload 237 kernel/bpf/offload.c struct bpf_prog_offload *offload; offload 241 kernel/bpf/offload.c offload = prog->aux->offload; offload 242 kernel/bpf/offload.c if (offload) offload 243 kernel/bpf/offload.c ret = offload->offdev->ops->translate(prog); offload 278 kernel/bpf/offload.c if (aux->offload) { offload 279 kernel/bpf/offload.c args->info->ifindex = aux->offload->netdev->ifindex; offload 280 kernel/bpf/offload.c net = dev_net(aux->offload->netdev); offload 317 kernel/bpf/offload.c if (!aux->offload) { offload 323 kernel/bpf/offload.c info->jited_prog_len = aux->offload->jited_len; offload 327 kernel/bpf/offload.c if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) { offload 550 kernel/bpf/offload.c struct bpf_prog_offload *offload; offload 555 kernel/bpf/offload.c offload = prog->aux->offload; offload 556 kernel/bpf/offload.c if (!offload) offload 558 kernel/bpf/offload.c if (offload->netdev == netdev) offload 561 kernel/bpf/offload.c ondev1 = bpf_offload_find_netdev(offload->netdev); offload 633 kernel/bpf/offload.c struct bpf_prog_offload *offload, *ptmp; offload 650 kernel/bpf/offload.c list_for_each_entry(offload, &ondev->progs, offloads) offload 651 kernel/bpf/offload.c offload->netdev = altdev->netdev; offload 658 kernel/bpf/offload.c list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads) offload 659 kernel/bpf/offload.c __bpf_prog_offload_destroy(offload->prog); offload 8388 net/core/dev.c bool offload; offload 8393 net/core/dev.c offload = flags & XDP_FLAGS_HW_MODE; offload 8394 net/core/dev.c query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG; offload 8409 net/core/dev.c if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) { offload 8425 net/core/dev.c if (!offload && bpf_prog_is_dev_bound(prog->aux)) { offload 65 net/netfilter/nf_tables_offload.c if (!expr->ops->offload) { offload 69 net/netfilter/nf_tables_offload.c err = expr->ops->offload(ctx, flow, expr); offload 154 net/netfilter/nft_bitwise.c .offload = nft_bitwise_offload, offload 154 net/netfilter/nft_cmp.c .offload = nft_cmp_offload, offload 230 net/netfilter/nft_cmp.c .offload = nft_cmp_fast_offload, offload 77 net/netfilter/nft_dup_netdev.c .offload = nft_dup_netdev_offload, offload 220 net/netfilter/nft_fwd_netdev.c .offload = nft_fwd_netdev_offload, offload 175 net/netfilter/nft_immediate.c .offload = nft_immediate_offload, offload 564 net/netfilter/nft_meta.c .offload = nft_meta_get_offload, offload 383 net/netfilter/nft_payload.c .offload = nft_payload_offload, offload 392 net/netfilter/nft_payload.c .offload = nft_payload_offload, offload 70 net/sched/sch_cbs.c bool offload; offload 256 net/sched/sch_cbs.c if (!q->offload) offload 381 net/sched/sch_cbs.c if (!qopt->offload) { offload 395 net/sched/sch_cbs.c q->offload = qopt->offload; offload 465 net/sched/sch_cbs.c opt.offload = q->offload; offload 28 net/sched/sch_etf.c bool offload; offload 303 net/sched/sch_etf.c if (!q->offload) offload 326 net/sched/sch_etf.c if (q->offload) offload 393 net/sched/sch_etf.c q->offload = OFFLOAD_IS_ON(qopt); offload 478 net/sched/sch_etf.c if (q->offload) offload 86 net/sched/sch_taprio.c struct tc_taprio_qopt_offload offload; offload 1121 net/sched/sch_taprio.c return &__offload->offload; offload 1125 net/sched/sch_taprio.c *offload) offload 1129 net/sched/sch_taprio.c __offload = container_of(offload, struct __tc_taprio_qopt_offload, offload 1130 net/sched/sch_taprio.c offload); offload 1134 net/sched/sch_taprio.c return offload; offload 1138 net/sched/sch_taprio.c void taprio_offload_free(struct tc_taprio_qopt_offload *offload) offload 1142 net/sched/sch_taprio.c __offload = container_of(offload, struct __tc_taprio_qopt_offload, offload 1143 net/sched/sch_taprio.c offload); offload 1183 net/sched/sch_taprio.c struct tc_taprio_qopt_offload *offload) offload 1188 net/sched/sch_taprio.c offload->base_time = sched->base_time; offload 1189 net/sched/sch_taprio.c offload->cycle_time = sched->cycle_time; offload 1190 net/sched/sch_taprio.c offload->cycle_time_extension = sched->cycle_time_extension; offload 1193 net/sched/sch_taprio.c struct tc_taprio_sched_entry *e = &offload->entries[i]; offload 1201 net/sched/sch_taprio.c offload->num_entries = i; offload 1211 net/sched/sch_taprio.c struct tc_taprio_qopt_offload *offload; offload 1220 net/sched/sch_taprio.c offload = taprio_offload_alloc(sched->num_entries); offload 1221 net/sched/sch_taprio.c if (!offload) { offload 1226 net/sched/sch_taprio.c offload->enable = 1; offload 1227 net/sched/sch_taprio.c taprio_sched_to_offload(q, sched, mqprio, offload); offload 1229 net/sched/sch_taprio.c err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); offload 1237 net/sched/sch_taprio.c taprio_offload_free(offload); offload 1247 net/sched/sch_taprio.c struct tc_taprio_qopt_offload *offload; offload 1256 net/sched/sch_taprio.c offload = taprio_offload_alloc(0); offload 1257 net/sched/sch_taprio.c if (!offload) { offload 1262 net/sched/sch_taprio.c offload->enable = 0; offload 1264 net/sched/sch_taprio.c err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); offload 1272 net/sched/sch_taprio.c taprio_offload_free(offload); offload 2443 net/xfrm/xfrm_state.c int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload) offload 2493 net/xfrm/xfrm_state.c x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload); offload 224 samples/mic/mpssd/mpssd.c unsigned offload; offload 246 samples/mic/mpssd/mpssd.c offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | TUN_F_TSO_ECN; offload 248 samples/mic/mpssd/mpssd.c err = ioctl(fd, TUNSETOFFLOAD, offload); offload 969 tools/include/uapi/linux/pkt_sched.h __u8 offload;