Home
last modified time | relevance | path

Searched refs:segs (Results 1 – 69 of 69) sorted by relevance

/linux-4.1.27/net/xfrm/
Dxfrm_output.c154 struct sk_buff *segs; in xfrm_output_gso() local
158 segs = skb_gso_segment(skb, 0); in xfrm_output_gso()
160 if (IS_ERR(segs)) in xfrm_output_gso()
161 return PTR_ERR(segs); in xfrm_output_gso()
162 if (segs == NULL) in xfrm_output_gso()
166 struct sk_buff *nskb = segs->next; in xfrm_output_gso()
169 segs->next = NULL; in xfrm_output_gso()
170 err = xfrm_output2(sk, segs); in xfrm_output_gso()
177 segs = nskb; in xfrm_output_gso()
178 } while (segs); in xfrm_output_gso()
/linux-4.1.27/net/ipv4/
Dudp_offload.c34 struct sk_buff *segs = ERR_PTR(-EINVAL); in __skb_udp_tunnel_segment() local
68 segs = gso_inner_segment(skb, enc_features); in __skb_udp_tunnel_segment()
69 if (IS_ERR_OR_NULL(segs)) { in __skb_udp_tunnel_segment()
77 skb = segs; in __skb_udp_tunnel_segment()
137 return segs; in __skb_udp_tunnel_segment()
147 struct sk_buff *segs = ERR_PTR(-EINVAL); in skb_udp_tunnel_segment() local
169 segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, in skb_udp_tunnel_segment()
175 return segs; in skb_udp_tunnel_segment()
181 struct sk_buff *segs = ERR_PTR(-EINVAL); in udp4_ufo_fragment() local
190 segs = skb_udp_tunnel_segment(skb, features, false); in udp4_ufo_fragment()
[all …]
Dgre_offload.c21 struct sk_buff *segs = ERR_PTR(-EINVAL); in gre_gso_segment() local
73 segs = skb_mac_gso_segment(skb, enc_features); in gre_gso_segment()
74 if (IS_ERR_OR_NULL(segs)) { in gre_gso_segment()
79 skb = segs; in gre_gso_segment()
91 kfree_skb_list(segs); in gre_gso_segment()
92 segs = ERR_PTR(err); in gre_gso_segment()
116 return segs; in gre_gso_segment()
Dtcp_offload.c57 struct sk_buff *segs = ERR_PTR(-EINVAL); in tcp_gso_segment() local
106 segs = NULL; in tcp_gso_segment()
115 segs = skb_segment(skb, features); in tcp_gso_segment()
116 if (IS_ERR(segs)) in tcp_gso_segment()
120 segs->ooo_okay = ooo_okay; in tcp_gso_segment()
124 skb = segs; in tcp_gso_segment()
129 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); in tcp_gso_segment()
175 return segs; in tcp_gso_segment()
Dip_output.c222 struct sk_buff *segs; in ip_finish_output_gso() local
239 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in ip_finish_output_gso()
240 if (IS_ERR_OR_NULL(segs)) { in ip_finish_output_gso()
248 struct sk_buff *nskb = segs->next; in ip_finish_output_gso()
251 segs->next = NULL; in ip_finish_output_gso()
252 err = ip_fragment(sk, segs, ip_finish_output2); in ip_finish_output_gso()
256 segs = nskb; in ip_finish_output_gso()
257 } while (segs); in ip_finish_output_gso()
Daf_inet.c1205 struct sk_buff *segs = ERR_PTR(-EINVAL); in inet_gso_segment() local
1256 segs = ERR_PTR(-EPROTONOSUPPORT); in inet_gso_segment()
1266 segs = ops->callbacks.gso_segment(skb, features); in inet_gso_segment()
1268 if (IS_ERR_OR_NULL(segs)) in inet_gso_segment()
1271 skb = segs; in inet_gso_segment()
1291 return segs; in inet_gso_segment()
Droute.c472 u32 ip_idents_reserve(u32 hash, int segs) in ip_idents_reserve() argument
482 return atomic_add_return(segs + delta, &bucket->id) - segs; in ip_idents_reserve()
486 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs) in __ip_select_ident() argument
497 id = ip_idents_reserve(hash, segs); in __ip_select_ident()
Dtcp_output.c1546 u32 bytes, segs; in tcp_tso_autosize() local
1556 segs = max_t(u32, bytes / mss_now, sysctl_tcp_min_tso_segs); in tcp_tso_autosize()
1558 return min_t(u32, segs, sk->sk_gso_max_segs); in tcp_tso_autosize()
/linux-4.1.27/drivers/infiniband/hw/ipath/
Dipath_keys.c167 while (off >= mr->map[m]->segs[n].length) { in ipath_lkey_ok()
168 off -= mr->map[m]->segs[n].length; in ipath_lkey_ok()
176 isge->vaddr = mr->map[m]->segs[n].vaddr + off; in ipath_lkey_ok()
177 isge->length = mr->map[m]->segs[n].length - off; in ipath_lkey_ok()
249 while (off >= mr->map[m]->segs[n].length) { in ipath_rkey_ok()
250 off -= mr->map[m]->segs[n].length; in ipath_rkey_ok()
258 sge->vaddr = mr->map[m]->segs[n].vaddr + off; in ipath_rkey_ok()
259 sge->length = mr->map[m]->segs[n].length - off; in ipath_rkey_ok()
Dipath_mr.c158 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr; in ipath_reg_phys_mr()
159 mr->mr.map[m]->segs[n].length = buffer_list[i].size; in ipath_reg_phys_mr()
232 mr->mr.map[m]->segs[n].vaddr = vaddr; in ipath_reg_user_mr()
233 mr->mr.map[m]->segs[n].length = umem->page_size; in ipath_reg_user_mr()
371 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; in ipath_map_phys_fmr()
372 fmr->mr.map[m]->segs[n].length = ps; in ipath_map_phys_fmr()
Dipath_verbs.c197 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_copy_sge()
199 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_copy_sge()
236 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_skip_sge()
238 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_skip_sge()
283 sge.mr->map[sge.m]->segs[sge.n].vaddr; in ipath_count_sge()
285 sge.mr->map[sge.m]->segs[sge.n].length; in ipath_count_sge()
322 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_copy_from_sge()
324 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_copy_from_sge()
796 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; in update_sge()
797 sge->length = sge->mr->map[sge->m]->segs[sge->n].length; in update_sge()
Dipath_ud.c207 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_ud_loopback()
209 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_ud_loopback()
Dipath_ruc.c422 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_ruc_loopback()
424 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_ruc_loopback()
Dipath_sdma.c772 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_sdma_verbs_send()
774 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_sdma_verbs_send()
Dipath_verbs.h238 struct ipath_seg segs[IPATH_SEGSZ]; member
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_keys.c217 while (off >= mr->map[m]->segs[n].length) { in qib_lkey_ok()
218 off -= mr->map[m]->segs[n].length; in qib_lkey_ok()
227 isge->vaddr = mr->map[m]->segs[n].vaddr + off; in qib_lkey_ok()
228 isge->length = mr->map[m]->segs[n].length - off; in qib_lkey_ok()
316 while (off >= mr->map[m]->segs[n].length) { in qib_rkey_ok()
317 off -= mr->map[m]->segs[n].length; in qib_rkey_ok()
326 sge->vaddr = mr->map[m]->segs[n].vaddr + off; in qib_rkey_ok()
327 sge->length = mr->map[m]->segs[n].length - off; in qib_rkey_ok()
379 mr->map[m]->segs[n].vaddr = (void *) page_list[i]; in qib_fast_reg_mr()
380 mr->map[m]->segs[n].length = ps; in qib_fast_reg_mr()
Dqib_mr.c203 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr; in qib_reg_phys_mr()
204 mr->mr.map[m]->segs[n].length = buffer_list[i].size; in qib_reg_phys_mr()
277 mr->mr.map[m]->segs[n].vaddr = vaddr; in qib_reg_user_mr()
278 mr->mr.map[m]->segs[n].length = umem->page_size; in qib_reg_user_mr()
463 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; in qib_map_phys_fmr()
464 fmr->mr.map[m]->segs[n].length = ps; in qib_map_phys_fmr()
Dqib_verbs.c197 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_copy_sge()
199 sge->mr->map[sge->m]->segs[sge->n].length; in qib_copy_sge()
238 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_skip_sge()
240 sge->mr->map[sge->m]->segs[sge->n].length; in qib_skip_sge()
285 sge.mr->map[sge.m]->segs[sge.n].vaddr; in qib_count_sge()
287 sge.mr->map[sge.m]->segs[sge.n].length; in qib_count_sge()
323 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_copy_from_sge()
325 sge->mr->map[sge->m]->segs[sge->n].length; in qib_copy_from_sge()
751 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; in update_sge()
752 sge->length = sge->mr->map[sge->m]->segs[sge->n].length; in update_sge()
Dqib_ud.c198 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_ud_loopback()
200 sge->mr->map[sge->m]->segs[sge->n].length; in qib_ud_loopback()
Dqib_sdma.c630 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_sdma_verbs_send()
632 sge->mr->map[sge->m]->segs[sge->n].length; in qib_sdma_verbs_send()
Dqib_ruc.c538 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_ruc_loopback()
540 sge->mr->map[sge->m]->segs[sge->n].length; in qib_ruc_loopback()
Dqib_verbs.h294 struct qib_seg segs[QIB_SEGSZ]; member
/linux-4.1.27/net/ipv6/
Dudp_offload.c23 struct sk_buff *segs = ERR_PTR(-EINVAL); in udp6_ufo_fragment() local
59 segs = NULL; in udp6_ufo_fragment()
65 segs = skb_udp_tunnel_segment(skb, features, true); in udp6_ufo_fragment()
122 segs = skb_segment(skb, features); in udp6_ufo_fragment()
126 return segs; in udp6_ufo_fragment()
Dip6_offload.c60 struct sk_buff *segs = ERR_PTR(-EINVAL); in ipv6_gso_segment() local
99 segs = ERR_PTR(-EPROTONOSUPPORT); in ipv6_gso_segment()
112 segs = ops->callbacks.gso_segment(skb, features); in ipv6_gso_segment()
115 if (IS_ERR(segs)) in ipv6_gso_segment()
118 for (skb = segs; skb; skb = skb->next) { in ipv6_gso_segment()
137 return segs; in ipv6_gso_segment()
/linux-4.1.27/net/mpls/
Dmpls_gso.c25 struct sk_buff *segs = ERR_PTR(-EINVAL); in mpls_gso_segment() local
48 segs = skb_mac_gso_segment(skb, mpls_features); in mpls_gso_segment()
60 return segs; in mpls_gso_segment()
/linux-4.1.27/net/sched/
Dsch_tbf.c161 struct sk_buff *segs, *nskb; in tbf_segment() local
165 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in tbf_segment()
167 if (IS_ERR_OR_NULL(segs)) in tbf_segment()
171 while (segs) { in tbf_segment()
172 nskb = segs->next; in tbf_segment()
173 segs->next = NULL; in tbf_segment()
174 qdisc_skb_cb(segs)->pkt_len = segs->len; in tbf_segment()
175 ret = qdisc_enqueue(segs, q->qdisc); in tbf_segment()
182 segs = nskb; in tbf_segment()
/linux-4.1.27/fs/
Dbinfmt_elf_fdpic.c766 seg = loadmap->segs; in elf_fdpic_map_file()
787 seg = loadmap->segs; in elf_fdpic_map_file()
812 seg = loadmap->segs; in elf_fdpic_map_file()
833 seg = loadmap->segs; in elf_fdpic_map_file()
865 mseg = loadmap->segs; in elf_fdpic_map_file()
893 seg = loadmap->segs; in elf_fdpic_map_file()
924 seg = params->loadmap->segs; in elf_fdpic_map_file_constdisp_on_uclinux()
1015 seg = params->loadmap->segs; in elf_fdpic_map_file_by_direct_mmap()
1274 static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs) in fill_elf_fdpic_header() argument
1292 elf->e_phnum = segs; in fill_elf_fdpic_header()
[all …]
Dbinfmt_elf.c1343 static void fill_elf_header(struct elfhdr *elf, int segs, in fill_elf_header() argument
1361 elf->e_phnum = segs; in fill_elf_header()
2092 elf_addr_t e_shoff, int segs) in fill_extnum_info() argument
2104 shdr4extnum->sh_info = segs; in fill_extnum_info()
2118 int segs, i; in elf_core_dump() local
2150 segs = current->mm->map_count; in elf_core_dump()
2151 segs += elf_core_extra_phdrs(); in elf_core_dump()
2155 segs++; in elf_core_dump()
2158 segs++; in elf_core_dump()
2163 e_phnum = segs > PN_XNUM ? PN_XNUM : segs; in elf_core_dump()
[all …]
/linux-4.1.27/drivers/block/
Dxen-blkfront.c945 int i, j, segs; in blkif_free() local
994 segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ? in blkif_free()
997 for (j = 0; j < segs; j++) { in blkif_free()
1012 for (j = 0; j < INDIRECT_GREFS(segs); j++) { in blkif_free()
1468 unsigned int segs, offset; in blkif_recover() local
1492 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; in blkif_recover()
1493 blk_queue_max_segments(info->rq, segs); in blkif_recover()
1557 BUG_ON(req->nr_phys_segments > segs); in blkif_recover()
1564 if (bio_segments(bio) > segs) { in blkif_recover()
1569 pending = (bio_segments(bio) + segs - 1) / segs; in blkif_recover()
[all …]
/linux-4.1.27/drivers/usb/wusbcore/
Dwa-xfer.c157 u8 segs, segs_submitted, segs_done; member
191 for (cnt = 0; cnt < xfer->segs; cnt++) { in wa_xfer_destroy()
349 for (cnt = 0; cnt < xfer->segs; cnt++) { in __wa_xfer_is_done()
363 && cnt != xfer->segs-1) in __wa_xfer_is_done()
469 while (seg_index < xfer->segs) { in __wa_xfer_abort_cb()
640 xfer->segs = 0; in __wa_xfer_setup_sizes()
649 ++xfer->segs; in __wa_xfer_setup_sizes()
652 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, in __wa_xfer_setup_sizes()
654 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) in __wa_xfer_setup_sizes()
655 xfer->segs = 1; in __wa_xfer_setup_sizes()
[all …]
/linux-4.1.27/include/uapi/linux/
Delf-fdpic.h29 struct elf32_fdpic_loadseg segs[]; member
/linux-4.1.27/include/net/
Dip.h322 u32 ip_idents_reserve(u32 hash, int segs);
323 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
326 struct sock *sk, int segs) in ip_select_ident_segs() argument
338 inet_sk(sk)->inet_id += segs; in ip_select_ident_segs()
343 __ip_select_ident(net, iph, segs); in ip_select_ident_segs()
Dtcp.h758 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs) in tcp_skb_pcount_set() argument
760 TCP_SKB_CB(skb)->tcp_gso_segs = segs; in tcp_skb_pcount_set()
763 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs) in tcp_skb_pcount_add() argument
765 TCP_SKB_CB(skb)->tcp_gso_segs += segs; in tcp_skb_pcount_add()
/linux-4.1.27/drivers/net/
Dmacvtap.c317 struct sk_buff *segs = __skb_gso_segment(skb, features, false); in macvtap_handle_frame() local
319 if (IS_ERR(segs)) in macvtap_handle_frame()
322 if (!segs) { in macvtap_handle_frame()
328 while (segs) { in macvtap_handle_frame()
329 struct sk_buff *nskb = segs->next; in macvtap_handle_frame()
331 segs->next = NULL; in macvtap_handle_frame()
332 skb_queue_tail(&q->sk.sk_receive_queue, segs); in macvtap_handle_frame()
333 segs = nskb; in macvtap_handle_frame()
/linux-4.1.27/net/netfilter/
Dnfnetlink_queue_core.c642 struct sk_buff *skb, *segs; in nfqnl_enqueue_packet() local
671 segs = skb_gso_segment(skb, 0); in nfqnl_enqueue_packet()
676 if (IS_ERR_OR_NULL(segs)) in nfqnl_enqueue_packet()
681 struct sk_buff *nskb = segs->next; in nfqnl_enqueue_packet()
684 segs, entry); in nfqnl_enqueue_packet()
688 kfree_skb(segs); in nfqnl_enqueue_packet()
689 segs = nskb; in nfqnl_enqueue_packet()
690 } while (segs); in nfqnl_enqueue_packet()
/linux-4.1.27/arch/powerpc/platforms/powernv/
Dpci-ioda.c1768 unsigned int segs) in pnv_pci_ioda_setup_dma_pe() argument
1789 (base << 28), ((base + segs) << 28) - 1); in pnv_pci_ioda_setup_dma_pe()
1797 get_order(TCE32_TABLE_SIZE * segs)); in pnv_pci_ioda_setup_dma_pe()
1803 memset(addr, 0, TCE32_TABLE_SIZE * segs); in pnv_pci_ioda_setup_dma_pe()
1806 for (i = 0; i < segs; i++) { in pnv_pci_ioda_setup_dma_pe()
1821 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs, in pnv_pci_ioda_setup_dma_pe()
1860 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); in pnv_pci_ioda_setup_dma_pe()
2008 unsigned int residual, remaining, segs, tw, base; in pnv_ioda_setup_dma() local
2041 segs = 1; in pnv_ioda_setup_dma()
2043 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw; in pnv_ioda_setup_dma()
[all …]
/linux-4.1.27/arch/mips/include/asm/octeon/
Dcvmx-pko.h242 uint64_t segs:6; member
247 uint64_t segs:6;
Dcvmx-pko-defs.h157 uint64_t segs:6; member
161 uint64_t segs:6;
236 uint64_t segs:6; member
240 uint64_t segs:6;
370 uint64_t segs:6; member
374 uint64_t segs:6;
/linux-4.1.27/include/linux/
Dbio.h258 unsigned segs = 0; in bio_segments() local
274 segs++; in bio_segments()
276 return segs; in bio_segments()
Dblkdev.h1519 unsigned int segs) in blk_queue_max_integrity_segments() argument
1521 q->limits.max_integrity_segments = segs; in blk_queue_max_integrity_segments()
1573 unsigned int segs) in blk_queue_max_integrity_segments() argument
Dskbuff.h766 void kfree_skb_list(struct sk_buff *segs);
/linux-4.1.27/net/openvswitch/
Ddatapath.c339 struct sk_buff *segs, *nskb; in queue_gso_packets() local
343 segs = __skb_gso_segment(skb, NETIF_F_SG, false); in queue_gso_packets()
344 if (IS_ERR(segs)) in queue_gso_packets()
345 return PTR_ERR(segs); in queue_gso_packets()
346 if (segs == NULL) in queue_gso_packets()
359 skb = segs; in queue_gso_packets()
361 if (gso_type & SKB_GSO_UDP && skb != segs) in queue_gso_packets()
371 skb = segs; in queue_gso_packets()
/linux-4.1.27/arch/x86/kvm/
Dvmx.c515 struct kvm_segment segs[8]; member
3305 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_pmode()
3306 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_pmode()
3307 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_pmode()
3308 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_pmode()
3309 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_pmode()
3310 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_pmode()
3316 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_pmode()
3328 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_pmode()
3329 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_pmode()
[all …]
/linux-4.1.27/drivers/net/ethernet/sun/
Dsunvnet.c1151 struct sk_buff *segs; in vnet_handle_offloads() local
1202 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); in vnet_handle_offloads()
1203 if (IS_ERR(segs)) in vnet_handle_offloads()
1210 while (segs) { in vnet_handle_offloads()
1211 struct sk_buff *curr = segs; in vnet_handle_offloads()
1213 segs = segs->next; in vnet_handle_offloads()
/linux-4.1.27/drivers/media/dvb-frontends/
Dmb86a20s.c1447 const struct linear_segments *segs; in mb86a20s_get_blk_error_layer_CNR() local
1499 segs = cnr_qpsk_table; in mb86a20s_get_blk_error_layer_CNR()
1503 segs = cnr_16qam_table; in mb86a20s_get_blk_error_layer_CNR()
1508 segs = cnr_64qam_table; in mb86a20s_get_blk_error_layer_CNR()
1512 cnr = interpolate_value(mer, segs, segs_len); in mb86a20s_get_blk_error_layer_CNR()
/linux-4.1.27/net/core/
Dskbuff.c748 void kfree_skb_list(struct sk_buff *segs) in kfree_skb_list() argument
750 while (segs) { in kfree_skb_list()
751 struct sk_buff *next = segs->next; in kfree_skb_list()
753 kfree_skb(segs); in kfree_skb_list()
754 segs = next; in kfree_skb_list()
3002 struct sk_buff *segs = NULL; in skb_segment() local
3103 if (segs) in skb_segment()
3106 segs = nskb; in skb_segment()
3206 segs->prev = tail; in skb_segment()
3217 return segs; in skb_segment()
[all …]
Ddev.c2436 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); in skb_mac_gso_segment() local
2449 segs = ptype->callbacks.gso_segment(skb, features); in skb_mac_gso_segment()
2457 return segs; in skb_mac_gso_segment()
2711 struct sk_buff *segs; in validate_xmit_skb() local
2713 segs = skb_gso_segment(skb, features); in validate_xmit_skb()
2714 if (IS_ERR(segs)) { in validate_xmit_skb()
2716 } else if (segs) { in validate_xmit_skb()
2718 skb = segs; in validate_xmit_skb()
/linux-4.1.27/drivers/net/ethernet/intel/e1000/
De1000.h158 unsigned short segs; member
De1000_main.c2844 unsigned int f, bytecount, segs; in e1000_tx_map() local
2953 segs = skb_shinfo(skb)->gso_segs ?: 1; in e1000_tx_map()
2955 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; in e1000_tx_map()
2958 tx_ring->buffer_info[i].segs = segs; in e1000_tx_map()
3866 total_tx_packets += buffer_info->segs; in e1000_clean_tx_irq()
/linux-4.1.27/drivers/net/ethernet/myricom/myri10ge/
Dmyri10ge.c3087 struct sk_buff *segs, *curr; in myri10ge_sw_tso() local
3092 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); in myri10ge_sw_tso()
3093 if (IS_ERR(segs)) in myri10ge_sw_tso()
3096 while (segs) { in myri10ge_sw_tso()
3097 curr = segs; in myri10ge_sw_tso()
3098 segs = segs->next; in myri10ge_sw_tso()
3103 if (segs != NULL) { in myri10ge_sw_tso()
3104 curr = segs; in myri10ge_sw_tso()
3105 segs = segs->next; in myri10ge_sw_tso()
3107 dev_kfree_skb_any(segs); in myri10ge_sw_tso()
/linux-4.1.27/drivers/staging/octeon/
Dethernet-tx.c281 pko_command.s.segs = 1; in cvm_oct_xmit()
310 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; in cvm_oct_xmit()
/linux-4.1.27/drivers/scsi/
Dvmw_pvscsi.c368 int segs = scsi_dma_map(cmd); in pvscsi_map_buffers() local
369 if (segs > 1) { in pvscsi_map_buffers()
370 pvscsi_create_sg(ctx, sg, segs); in pvscsi_map_buffers()
Dosst.c5333 int segs, nbr, max_segs, b_size, order, got; in enlarge_buffer() local
5369 for (segs=STbuffer->sg_segs=1, got=b_size; in enlarge_buffer()
5370 segs < max_segs && got < OS_FRAME_SIZE; ) { in enlarge_buffer()
5372 STbuffer->sg[segs].offset = 0; in enlarge_buffer()
5382 …sg_set_page(&STbuffer->sg[segs], page, (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - g… in enlarge_buffer()
5383 got += STbuffer->sg[segs].length; in enlarge_buffer()
5385 STbuffer->sg_segs = ++segs; in enlarge_buffer()
5395 STbuffer->sg[segs-1].length, page_address(STbuffer->sg[segs-1].page)); in enlarge_buffer()
Dst.c3749 int segs, max_segs, b_size, order, got; in enlarge_buffer() local
3785 for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size; in enlarge_buffer()
3786 segs < max_segs && got < new_size;) { in enlarge_buffer()
3799 STbuffer->reserved_pages[segs] = page; in enlarge_buffer()
3800 segs++; in enlarge_buffer()
/linux-4.1.27/drivers/net/usb/
Dr8152.c1374 struct sk_buff *segs, *nskb; in r8152_csum_workaround() local
1377 segs = skb_gso_segment(skb, features); in r8152_csum_workaround()
1378 if (IS_ERR(segs) || !segs) in r8152_csum_workaround()
1384 nskb = segs; in r8152_csum_workaround()
1385 segs = segs->next; in r8152_csum_workaround()
1388 } while (segs); in r8152_csum_workaround()
/linux-4.1.27/drivers/net/ethernet/intel/e1000e/
De1000.h156 unsigned int segs; member
Dnetdev.c1242 total_tx_packets += buffer_info->segs; in e1000_clean_tx_irq()
5320 unsigned int f, bytecount, segs; in e1000_tx_map() local
5380 segs = skb_shinfo(skb)->gso_segs ? : 1; in e1000_tx_map()
5382 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; in e1000_tx_map()
5385 tx_ring->buffer_info[i].segs = segs; in e1000_tx_map()
/linux-4.1.27/drivers/net/ethernet/intel/igbvf/
Dnetdev.c826 unsigned int segs, bytecount; in igbvf_clean_tx_irq() local
829 segs = skb_shinfo(skb)->gso_segs ?: 1; in igbvf_clean_tx_irq()
831 bytecount = ((segs - 1) * skb_headlen(skb)) + in igbvf_clean_tx_irq()
833 total_packets += segs; in igbvf_clean_tx_irq()
/linux-4.1.27/arch/mips/cavium-octeon/executive/
Dcvmx-helper.c878 pko_command.s.segs = num_segs; in __cvmx_helper_errata_fix_ipd_ptr_alignment()
/linux-4.1.27/drivers/net/ethernet/realtek/
Dr8169.c6873 struct sk_buff *segs, *nskb; in r8169_csum_workaround() local
6876 segs = skb_gso_segment(skb, features); in r8169_csum_workaround()
6877 if (IS_ERR(segs) || !segs) in r8169_csum_workaround()
6881 nskb = segs; in r8169_csum_workaround()
6882 segs = segs->next; in r8169_csum_workaround()
6885 } while (segs); in r8169_csum_workaround()
/linux-4.1.27/drivers/scsi/aic7xxx/
Daic7xxx_core.c4713 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) in ahc_dmamap_cb() argument
4718 *baddr = segs->ds_addr; in ahc_dmamap_cb()
4971 struct ahc_dma_seg *segs; in ahc_alloc_scbs() local
5001 segs = sg_map->sg_vaddr; in ahc_alloc_scbs()
5016 next_scb->sg_list = segs; in ahc_alloc_scbs()
5034 segs += AHC_NSEG; in ahc_alloc_scbs()
Daic79xx_core.c6382 ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) in ahd_dmamap_cb() argument
6387 *baddr = segs->ds_addr; in ahd_dmamap_cb()
6814 uint8_t *segs; in ahd_alloc_scbs() local
6865 segs = sg_map->vaddr + offset; in ahd_alloc_scbs()
6887 segs = sg_map->vaddr; in ahd_alloc_scbs()
6956 next_scb->sg_list = segs; in ahd_alloc_scbs()
6992 segs += ahd_sglist_size(ahd); in ahd_alloc_scbs()
Daic7xxx.seq735 /* Did we just finish fetching segs? */
Daic79xx.seq1475 /* Did we just finish fetching segs? */
/linux-4.1.27/fs/cifs/
Dconnect.c549 unsigned int segs; in cifs_readv_from_socket() local
568 segs = kvec_array_init(iov, iov_orig, nr_segs, total_read); in cifs_readv_from_socket()
571 iov, segs, to_read, 0); in cifs_readv_from_socket()
/linux-4.1.27/drivers/scsi/qla2xxx/
Dqla_os.c5321 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
5322 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
5323 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
5324 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
Dqla_def.h2580 uint32_t segs[4]; member
Dqla_init.c5553 seg = blob->segs; in qla2x00_load_risc()
/linux-4.1.27/drivers/net/ethernet/broadcom/
Dtg3.c7852 struct sk_buff *segs, *nskb; in tg3_tso_bug() local
7871 segs = skb_gso_segment(skb, tp->dev->features & in tg3_tso_bug()
7873 if (IS_ERR(segs) || !segs) in tg3_tso_bug()
7877 nskb = segs; in tg3_tso_bug()
7878 segs = segs->next; in tg3_tso_bug()
7881 } while (segs); in tg3_tso_bug()