Lines Matching refs:skb

159 	struct sk_buff *skb;  in i2400m_wake_tx_work()  local
163 skb = i2400m->wake_tx_skb; in i2400m_wake_tx_work()
167 d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb); in i2400m_wake_tx_work()
169 if (skb == NULL) { in i2400m_wake_tx_work()
199 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA); in i2400m_wake_tx_work()
203 kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */ in i2400m_wake_tx_work()
207 ws, i2400m, skb, result); in i2400m_wake_tx_work()
220 void i2400m_tx_prep_header(struct sk_buff *skb) in i2400m_tx_prep_header() argument
223 skb_pull(skb, ETH_HLEN); in i2400m_tx_prep_header()
224 pl_hdr = (struct i2400m_pl_data_hdr *) skb_push(skb, sizeof(*pl_hdr)); in i2400m_tx_prep_header()
276 struct sk_buff *skb) in i2400m_net_wake_tx() argument
282 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); in i2400m_net_wake_tx()
286 skb, skb->len); in i2400m_net_wake_tx()
287 d_dump(4, dev, skb->data, skb->len); in i2400m_net_wake_tx()
297 i2400m->wake_tx_skb = skb_get(skb); /* transfer ref count */ in i2400m_net_wake_tx()
298 i2400m_tx_prep_header(skb); in i2400m_net_wake_tx()
311 skb, netif_queue_stopped(net_dev)); in i2400m_net_wake_tx()
314 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result); in i2400m_net_wake_tx()
329 struct sk_buff *skb) in i2400m_net_tx() argument
335 i2400m, net_dev, skb); in i2400m_net_tx()
338 i2400m_tx_prep_header(skb); in i2400m_net_tx()
340 skb, skb->len); in i2400m_net_tx()
341 d_dump(4, dev, skb->data, skb->len); in i2400m_net_tx()
342 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA); in i2400m_net_tx()
344 i2400m, net_dev, skb, result); in i2400m_net_tx()
368 netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb, in i2400m_hard_start_xmit() argument
375 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); in i2400m_hard_start_xmit()
377 if (skb_cow_head(skb, 0)) in i2400m_hard_start_xmit()
381 result = i2400m_net_wake_tx(i2400m, net_dev, skb); in i2400m_hard_start_xmit()
383 result = i2400m_net_tx(i2400m, net_dev, skb); in i2400m_hard_start_xmit()
389 net_dev->stats.tx_bytes += skb->len; in i2400m_hard_start_xmit()
391 dev_kfree_skb(skb); in i2400m_hard_start_xmit()
392 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result); in i2400m_hard_start_xmit()
492 struct sk_buff *skb; in i2400m_net_rx() local
497 skb = skb_get(skb_rx); in i2400m_net_rx()
498 d_printf(2, dev, "RX: reusing first payload skb %p\n", skb); in i2400m_net_rx()
499 skb_pull(skb, buf - (void *) skb->data); in i2400m_net_rx()
500 skb_trim(skb, (void *) skb_end_pointer(skb) - buf); in i2400m_net_rx()
504 skb = __netdev_alloc_skb(net_dev, buf_len, GFP_KERNEL); in i2400m_net_rx()
505 if (skb == NULL) { in i2400m_net_rx()
510 memcpy(skb_put(skb, buf_len), buf, buf_len); in i2400m_net_rx()
513 skb->data - ETH_HLEN, in i2400m_net_rx()
515 skb_set_mac_header(skb, -ETH_HLEN); in i2400m_net_rx()
516 skb->dev = i2400m->wimax_dev.net_dev; in i2400m_net_rx()
517 skb->protocol = htons(ETH_P_IP); in i2400m_net_rx()
523 netif_rx_ni(skb); /* see notes in function header */ in i2400m_net_rx()
552 void i2400m_net_erx(struct i2400m *i2400m, struct sk_buff *skb, in i2400m_net_erx() argument
560 i2400m, skb, skb->len, cs); in i2400m_net_erx()
566 skb->data - ETH_HLEN, in i2400m_net_erx()
568 skb_set_mac_header(skb, -ETH_HLEN); in i2400m_net_erx()
569 skb->dev = i2400m->wimax_dev.net_dev; in i2400m_net_erx()
570 skb->protocol = htons(ETH_P_IP); in i2400m_net_erx()
572 net_dev->stats.rx_bytes += skb->len; in i2400m_net_erx()
580 skb->len); in i2400m_net_erx()
581 d_dump(4, dev, skb->data, skb->len); in i2400m_net_erx()
582 netif_rx_ni(skb); /* see notes in function header */ in i2400m_net_erx()
585 i2400m, skb, skb->len, cs); in i2400m_net_erx()