Home
last modified time | relevance | path

Searched refs:segs (Results 1 – 79 of 79) sorted by relevance

/linux-4.4.14/net/xfrm/
Dxfrm_output.c168 struct sk_buff *segs; in xfrm_output_gso() local
172 segs = skb_gso_segment(skb, 0); in xfrm_output_gso()
174 if (IS_ERR(segs)) in xfrm_output_gso()
175 return PTR_ERR(segs); in xfrm_output_gso()
176 if (segs == NULL) in xfrm_output_gso()
180 struct sk_buff *nskb = segs->next; in xfrm_output_gso()
183 segs->next = NULL; in xfrm_output_gso()
184 err = xfrm_output2(net, sk, segs); in xfrm_output_gso()
191 segs = nskb; in xfrm_output_gso()
192 } while (segs); in xfrm_output_gso()
/linux-4.4.14/net/ipv4/
Dudp_offload.c34 struct sk_buff *segs = ERR_PTR(-EINVAL); in __skb_udp_tunnel_segment() local
68 segs = gso_inner_segment(skb, enc_features); in __skb_udp_tunnel_segment()
69 if (IS_ERR_OR_NULL(segs)) { in __skb_udp_tunnel_segment()
77 skb = segs; in __skb_udp_tunnel_segment()
137 return segs; in __skb_udp_tunnel_segment()
147 struct sk_buff *segs = ERR_PTR(-EINVAL); in skb_udp_tunnel_segment() local
169 segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, in skb_udp_tunnel_segment()
175 return segs; in skb_udp_tunnel_segment()
181 struct sk_buff *segs = ERR_PTR(-EINVAL); in udp4_ufo_fragment() local
190 segs = skb_udp_tunnel_segment(skb, features, false); in udp4_ufo_fragment()
[all …]
Dgre_offload.c21 struct sk_buff *segs = ERR_PTR(-EINVAL); in gre_gso_segment() local
73 segs = skb_mac_gso_segment(skb, enc_features); in gre_gso_segment()
74 if (IS_ERR_OR_NULL(segs)) { in gre_gso_segment()
79 skb = segs; in gre_gso_segment()
91 kfree_skb_list(segs); in gre_gso_segment()
92 segs = ERR_PTR(err); in gre_gso_segment()
116 return segs; in gre_gso_segment()
Dtcp_offload.c57 struct sk_buff *segs = ERR_PTR(-EINVAL); in tcp_gso_segment() local
106 segs = NULL; in tcp_gso_segment()
115 segs = skb_segment(skb, features); in tcp_gso_segment()
116 if (IS_ERR(segs)) in tcp_gso_segment()
120 segs->ooo_okay = ooo_okay; in tcp_gso_segment()
124 skb = segs; in tcp_gso_segment()
129 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); in tcp_gso_segment()
175 return segs; in tcp_gso_segment()
Dip_output.c227 struct sk_buff *segs; in ip_finish_output_gso() local
244 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in ip_finish_output_gso()
245 if (IS_ERR_OR_NULL(segs)) { in ip_finish_output_gso()
253 struct sk_buff *nskb = segs->next; in ip_finish_output_gso()
256 segs->next = NULL; in ip_finish_output_gso()
257 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2); in ip_finish_output_gso()
261 segs = nskb; in ip_finish_output_gso()
262 } while (segs); in ip_finish_output_gso()
Daf_inet.c1200 struct sk_buff *segs = ERR_PTR(-EINVAL); in inet_gso_segment() local
1251 segs = ERR_PTR(-EPROTONOSUPPORT); in inet_gso_segment()
1261 segs = ops->callbacks.gso_segment(skb, features); in inet_gso_segment()
1263 if (IS_ERR_OR_NULL(segs)) in inet_gso_segment()
1266 skb = segs; in inet_gso_segment()
1286 return segs; in inet_gso_segment()
Droute.c473 u32 ip_idents_reserve(u32 hash, int segs) in ip_idents_reserve() argument
484 return atomic_add_return(segs + delta, p_id) - segs; in ip_idents_reserve()
488 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs) in __ip_select_ident() argument
499 id = ip_idents_reserve(hash, segs); in __ip_select_ident()
Dtcp_output.c1535 u32 bytes, segs; in tcp_tso_autosize() local
1545 segs = max_t(u32, bytes / mss_now, sysctl_tcp_min_tso_segs); in tcp_tso_autosize()
1547 return min_t(u32, segs, sk->sk_gso_max_segs); in tcp_tso_autosize()
/linux-4.4.14/drivers/staging/rdma/ipath/
Dipath_keys.c167 while (off >= mr->map[m]->segs[n].length) { in ipath_lkey_ok()
168 off -= mr->map[m]->segs[n].length; in ipath_lkey_ok()
176 isge->vaddr = mr->map[m]->segs[n].vaddr + off; in ipath_lkey_ok()
177 isge->length = mr->map[m]->segs[n].length - off; in ipath_lkey_ok()
249 while (off >= mr->map[m]->segs[n].length) { in ipath_rkey_ok()
250 off -= mr->map[m]->segs[n].length; in ipath_rkey_ok()
258 sge->vaddr = mr->map[m]->segs[n].vaddr + off; in ipath_rkey_ok()
259 sge->length = mr->map[m]->segs[n].length - off; in ipath_rkey_ok()
Dipath_mr.c158 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr; in ipath_reg_phys_mr()
159 mr->mr.map[m]->segs[n].length = buffer_list[i].size; in ipath_reg_phys_mr()
232 mr->mr.map[m]->segs[n].vaddr = vaddr; in ipath_reg_user_mr()
233 mr->mr.map[m]->segs[n].length = umem->page_size; in ipath_reg_user_mr()
371 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; in ipath_map_phys_fmr()
372 fmr->mr.map[m]->segs[n].length = ps; in ipath_map_phys_fmr()
Dipath_verbs.c197 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_copy_sge()
199 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_copy_sge()
236 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_skip_sge()
238 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_skip_sge()
283 sge.mr->map[sge.m]->segs[sge.n].vaddr; in ipath_count_sge()
285 sge.mr->map[sge.m]->segs[sge.n].length; in ipath_count_sge()
322 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_copy_from_sge()
324 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_copy_from_sge()
809 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; in update_sge()
810 sge->length = sge->mr->map[sge->m]->segs[sge->n].length; in update_sge()
Dipath_ud.c206 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_ud_loopback()
208 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_ud_loopback()
Dipath_ruc.c421 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_ruc_loopback()
423 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_ruc_loopback()
Dipath_sdma.c772 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ipath_sdma_verbs_send()
774 sge->mr->map[sge->m]->segs[sge->n].length; in ipath_sdma_verbs_send()
Dipath_verbs.h238 struct ipath_seg segs[IPATH_SEGSZ]; member
/linux-4.4.14/drivers/infiniband/hw/qib/
Dqib_keys.c217 while (off >= mr->map[m]->segs[n].length) { in qib_lkey_ok()
218 off -= mr->map[m]->segs[n].length; in qib_lkey_ok()
227 isge->vaddr = mr->map[m]->segs[n].vaddr + off; in qib_lkey_ok()
228 isge->length = mr->map[m]->segs[n].length - off; in qib_lkey_ok()
316 while (off >= mr->map[m]->segs[n].length) { in qib_rkey_ok()
317 off -= mr->map[m]->segs[n].length; in qib_rkey_ok()
326 sge->vaddr = mr->map[m]->segs[n].vaddr + off; in qib_rkey_ok()
327 sge->length = mr->map[m]->segs[n].length - off; in qib_rkey_ok()
380 mrg->map[m]->segs[n].vaddr = (void *) page_list[i]; in qib_reg_mr()
381 mrg->map[m]->segs[n].length = ps; in qib_reg_mr()
Dqib_mr.c203 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr; in qib_reg_phys_mr()
204 mr->mr.map[m]->segs[n].length = buffer_list[i].size; in qib_reg_phys_mr()
277 mr->mr.map[m]->segs[n].vaddr = vaddr; in qib_reg_user_mr()
278 mr->mr.map[m]->segs[n].length = umem->page_size; in qib_reg_user_mr()
470 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; in qib_map_phys_fmr()
471 fmr->mr.map[m]->segs[n].length = ps; in qib_map_phys_fmr()
Dqib_verbs.c197 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_copy_sge()
199 sge->mr->map[sge->m]->segs[sge->n].length; in qib_copy_sge()
238 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_skip_sge()
240 sge->mr->map[sge->m]->segs[sge->n].length; in qib_skip_sge()
285 sge.mr->map[sge.m]->segs[sge.n].vaddr; in qib_count_sge()
287 sge.mr->map[sge.m]->segs[sge.n].length; in qib_count_sge()
323 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_copy_from_sge()
325 sge->mr->map[sge->m]->segs[sge->n].length; in qib_copy_from_sge()
767 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; in update_sge()
768 sge->length = sge->mr->map[sge->m]->segs[sge->n].length; in update_sge()
Dqib_ud.c198 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_ud_loopback()
200 sge->mr->map[sge->m]->segs[sge->n].length; in qib_ud_loopback()
Dqib_sdma.c630 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_sdma_verbs_send()
632 sge->mr->map[sge->m]->segs[sge->n].length; in qib_sdma_verbs_send()
Dqib_ruc.c539 sge->mr->map[sge->m]->segs[sge->n].vaddr; in qib_ruc_loopback()
541 sge->mr->map[sge->m]->segs[sge->n].length; in qib_ruc_loopback()
Dqib_verbs.h294 struct qib_seg segs[QIB_SEGSZ]; member
/linux-4.4.14/drivers/staging/rdma/hfi1/
Dkeys.c239 while (off >= mr->map[m]->segs[n].length) { in hfi1_lkey_ok()
240 off -= mr->map[m]->segs[n].length; in hfi1_lkey_ok()
249 isge->vaddr = mr->map[m]->segs[n].vaddr + off; in hfi1_lkey_ok()
250 isge->length = mr->map[m]->segs[n].length - off; in hfi1_lkey_ok()
336 while (off >= mr->map[m]->segs[n].length) { in hfi1_rkey_ok()
337 off -= mr->map[m]->segs[n].length; in hfi1_rkey_ok()
346 sge->vaddr = mr->map[m]->segs[n].vaddr + off; in hfi1_rkey_ok()
347 sge->length = mr->map[m]->segs[n].length - off; in hfi1_rkey_ok()
Dmr.c220 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr; in hfi1_reg_phys_mr()
221 mr->mr.map[m]->segs[n].length = buffer_list[i].size; in hfi1_reg_phys_mr()
294 mr->mr.map[m]->segs[n].vaddr = vaddr; in hfi1_reg_user_mr()
295 mr->mr.map[m]->segs[n].length = umem->page_size; in hfi1_reg_user_mr()
460 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; in hfi1_map_phys_fmr()
461 fmr->mr.map[m]->segs[n].length = ps; in hfi1_map_phys_fmr()
Dverbs.c307 sge->mr->map[sge->m]->segs[sge->n].vaddr; in hfi1_copy_sge()
309 sge->mr->map[sge->m]->segs[sge->n].length; in hfi1_copy_sge()
348 sge->mr->map[sge->m]->segs[sge->n].vaddr; in hfi1_skip_sge()
350 sge->mr->map[sge->m]->segs[sge->n].length; in hfi1_skip_sge()
731 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; in update_sge()
732 sge->length = sge->mr->map[sge->m]->segs[sge->n].length; in update_sge()
Dud.c220 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ud_loopback()
222 sge->mr->map[sge->m]->segs[sge->n].length; in ud_loopback()
Druc.c561 sge->mr->map[sge->m]->segs[sge->n].vaddr; in ruc_loopback()
563 sge->mr->map[sge->m]->segs[sge->n].length; in ruc_loopback()
Dverbs.h305 struct hfi1_seg segs[HFI1_SEGSZ]; member
/linux-4.4.14/net/ipv6/
Dudp_offload.c23 struct sk_buff *segs = ERR_PTR(-EINVAL); in udp6_ufo_fragment() local
59 segs = NULL; in udp6_ufo_fragment()
65 segs = skb_udp_tunnel_segment(skb, features, true); in udp6_ufo_fragment()
122 segs = skb_segment(skb, features); in udp6_ufo_fragment()
126 return segs; in udp6_ufo_fragment()
Dip6_offload.c60 struct sk_buff *segs = ERR_PTR(-EINVAL); in ipv6_gso_segment() local
99 segs = ERR_PTR(-EPROTONOSUPPORT); in ipv6_gso_segment()
112 segs = ops->callbacks.gso_segment(skb, features); in ipv6_gso_segment()
115 if (IS_ERR(segs)) in ipv6_gso_segment()
118 for (skb = segs; skb; skb = skb->next) { in ipv6_gso_segment()
137 return segs; in ipv6_gso_segment()
/linux-4.4.14/net/sched/
Dsch_tbf.c161 struct sk_buff *segs, *nskb; in tbf_segment() local
166 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in tbf_segment()
168 if (IS_ERR_OR_NULL(segs)) in tbf_segment()
172 while (segs) { in tbf_segment()
173 nskb = segs->next; in tbf_segment()
174 segs->next = NULL; in tbf_segment()
175 qdisc_skb_cb(segs)->pkt_len = segs->len; in tbf_segment()
176 len += segs->len; in tbf_segment()
177 ret = qdisc_enqueue(segs, q->qdisc); in tbf_segment()
184 segs = nskb; in tbf_segment()
Dsch_netem.c404 struct sk_buff *segs; in netem_segment() local
407 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in netem_segment()
409 if (IS_ERR_OR_NULL(segs)) { in netem_segment()
414 return segs; in netem_segment()
429 struct sk_buff *segs = NULL; in netem_enqueue() local
480 segs = netem_segment(skb, sch); in netem_enqueue()
481 if (!segs) in netem_enqueue()
484 segs = skb; in netem_enqueue()
487 skb = segs; in netem_enqueue()
488 segs = segs->next; in netem_enqueue()
[all …]
/linux-4.4.14/net/mpls/
Dmpls_gso.c25 struct sk_buff *segs = ERR_PTR(-EINVAL); in mpls_gso_segment() local
48 segs = skb_mac_gso_segment(skb, mpls_features); in mpls_gso_segment()
60 return segs; in mpls_gso_segment()
/linux-4.4.14/fs/
Dbinfmt_elf_fdpic.c792 seg = loadmap->segs; in elf_fdpic_map_file()
813 seg = loadmap->segs; in elf_fdpic_map_file()
838 seg = loadmap->segs; in elf_fdpic_map_file()
859 seg = loadmap->segs; in elf_fdpic_map_file()
891 mseg = loadmap->segs; in elf_fdpic_map_file()
919 seg = loadmap->segs; in elf_fdpic_map_file()
950 seg = params->loadmap->segs; in elf_fdpic_map_file_constdisp_on_uclinux()
1041 seg = params->loadmap->segs; in elf_fdpic_map_file_by_direct_mmap()
1314 static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs) in fill_elf_fdpic_header() argument
1332 elf->e_phnum = segs; in fill_elf_fdpic_header()
[all …]
Dbinfmt_elf.c1353 static void fill_elf_header(struct elfhdr *elf, int segs, in fill_elf_header() argument
1371 elf->e_phnum = segs; in fill_elf_header()
2102 elf_addr_t e_shoff, int segs) in fill_extnum_info() argument
2114 shdr4extnum->sh_info = segs; in fill_extnum_info()
2128 int segs, i; in elf_core_dump() local
2160 segs = current->mm->map_count; in elf_core_dump()
2161 segs += elf_core_extra_phdrs(); in elf_core_dump()
2165 segs++; in elf_core_dump()
2168 segs++; in elf_core_dump()
2173 e_phnum = segs > PN_XNUM ? PN_XNUM : segs; in elf_core_dump()
[all …]
/linux-4.4.14/drivers/usb/wusbcore/
Dwa-xfer.c157 u8 segs, segs_submitted, segs_done; member
191 for (cnt = 0; cnt < xfer->segs; cnt++) { in wa_xfer_destroy()
349 for (cnt = 0; cnt < xfer->segs; cnt++) { in __wa_xfer_is_done()
363 && cnt != xfer->segs-1) in __wa_xfer_is_done()
469 while (seg_index < xfer->segs) { in __wa_xfer_abort_cb()
640 xfer->segs = 0; in __wa_xfer_setup_sizes()
649 ++xfer->segs; in __wa_xfer_setup_sizes()
652 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, in __wa_xfer_setup_sizes()
654 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) in __wa_xfer_setup_sizes()
655 xfer->segs = 1; in __wa_xfer_setup_sizes()
[all …]
/linux-4.4.14/include/uapi/linux/
Delf-fdpic.h29 struct elf32_fdpic_loadseg segs[]; member
/linux-4.4.14/include/net/
Dip.h333 u32 ip_idents_reserve(u32 hash, int segs);
334 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
337 struct sock *sk, int segs) in ip_select_ident_segs() argument
349 inet_sk(sk)->inet_id += segs; in ip_select_ident_segs()
354 __ip_select_ident(net, iph, segs); in ip_select_ident_segs()
Dtcp.h798 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs) in tcp_skb_pcount_set() argument
800 TCP_SKB_CB(skb)->tcp_gso_segs = segs; in tcp_skb_pcount_set()
803 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs) in tcp_skb_pcount_add() argument
805 TCP_SKB_CB(skb)->tcp_gso_segs += segs; in tcp_skb_pcount_add()
/linux-4.4.14/net/netfilter/
Dnfnetlink_queue.c677 struct sk_buff *skb, *segs; in nfqnl_enqueue_packet() local
705 segs = skb_gso_segment(skb, 0); in nfqnl_enqueue_packet()
710 if (IS_ERR_OR_NULL(segs)) in nfqnl_enqueue_packet()
715 struct sk_buff *nskb = segs->next; in nfqnl_enqueue_packet()
718 segs, entry); in nfqnl_enqueue_packet()
722 kfree_skb(segs); in nfqnl_enqueue_packet()
723 segs = nskb; in nfqnl_enqueue_packet()
724 } while (segs); in nfqnl_enqueue_packet()
/linux-4.4.14/drivers/net/
Dmacvtap.c366 struct sk_buff *segs = __skb_gso_segment(skb, features, false); in macvtap_handle_frame() local
368 if (IS_ERR(segs)) in macvtap_handle_frame()
371 if (!segs) { in macvtap_handle_frame()
377 while (segs) { in macvtap_handle_frame()
378 struct sk_buff *nskb = segs->next; in macvtap_handle_frame()
380 segs->next = NULL; in macvtap_handle_frame()
381 skb_queue_tail(&q->sk.sk_receive_queue, segs); in macvtap_handle_frame()
382 segs = nskb; in macvtap_handle_frame()
/linux-4.4.14/drivers/block/
Dxen-blkfront.c1080 int i, j, segs; in blkif_free() local
1129 segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ? in blkif_free()
1132 for (j = 0; j < segs; j++) { in blkif_free()
1147 for (j = 0; j < INDIRECT_GREFS(segs); j++) { in blkif_free()
1658 unsigned int segs, offset; in blkif_recover() local
1682 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; in blkif_recover()
1683 blk_queue_max_segments(info->rq, segs); in blkif_recover()
1725 BUG_ON(req->nr_phys_segments > segs); in blkif_recover()
1733 if (bio_segments(bio) > segs) { in blkif_recover()
1738 pending = (bio_segments(bio) + segs - 1) / segs; in blkif_recover()
[all …]
/linux-4.4.14/arch/powerpc/platforms/powernv/
Dpci-ioda.c1850 unsigned int segs) in pnv_pci_ioda_setup_dma_pe() argument
1875 (base << 28), ((base + segs) << 28) - 1); in pnv_pci_ioda_setup_dma_pe()
1883 get_order(TCE32_TABLE_SIZE * segs)); in pnv_pci_ioda_setup_dma_pe()
1889 memset(addr, 0, TCE32_TABLE_SIZE * segs); in pnv_pci_ioda_setup_dma_pe()
1892 for (i = 0; i < segs; i++) { in pnv_pci_ioda_setup_dma_pe()
1906 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs, in pnv_pci_ioda_setup_dma_pe()
1937 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs)); in pnv_pci_ioda_setup_dma_pe()
2388 unsigned int residual, remaining, segs, tw, base; in pnv_ioda_setup_dma() local
2423 segs = 1; in pnv_ioda_setup_dma()
2425 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw; in pnv_ioda_setup_dma()
[all …]
/linux-4.4.14/arch/mips/include/asm/octeon/
Dcvmx-pko.h242 uint64_t segs:6; member
247 uint64_t segs:6;
Dcvmx-pko-defs.h157 uint64_t segs:6; member
161 uint64_t segs:6;
236 uint64_t segs:6; member
240 uint64_t segs:6;
370 uint64_t segs:6; member
374 uint64_t segs:6;
/linux-4.4.14/include/linux/
Dbio.h247 unsigned segs = 0; in bio_segments() local
263 segs++; in bio_segments()
265 return segs; in bio_segments()
Dblkdev.h1525 unsigned int segs) in blk_queue_max_integrity_segments() argument
1527 q->limits.max_integrity_segments = segs; in blk_queue_max_integrity_segments()
1598 unsigned int segs) in blk_queue_max_integrity_segments() argument
Dskbuff.h798 void kfree_skb_list(struct sk_buff *segs);
/linux-4.4.14/net/openvswitch/
Ddatapath.c338 struct sk_buff *segs, *nskb; in queue_gso_packets() local
342 segs = __skb_gso_segment(skb, NETIF_F_SG, false); in queue_gso_packets()
343 if (IS_ERR(segs)) in queue_gso_packets()
344 return PTR_ERR(segs); in queue_gso_packets()
345 if (segs == NULL) in queue_gso_packets()
358 skb = segs; in queue_gso_packets()
360 if (gso_type & SKB_GSO_UDP && skb != segs) in queue_gso_packets()
370 skb = segs; in queue_gso_packets()
/linux-4.4.14/arch/x86/kvm/
Dvmx.c565 struct kvm_segment segs[8]; member
3442 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_pmode()
3443 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_pmode()
3444 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_pmode()
3445 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_pmode()
3446 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_pmode()
3447 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_pmode()
3453 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_pmode()
3465 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_pmode()
3466 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_pmode()
[all …]
/linux-4.4.14/drivers/net/ethernet/sun/
Dsunvnet.c1151 struct sk_buff *segs; in vnet_handle_offloads() local
1202 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); in vnet_handle_offloads()
1203 if (IS_ERR(segs)) in vnet_handle_offloads()
1210 while (segs) { in vnet_handle_offloads()
1211 struct sk_buff *curr = segs; in vnet_handle_offloads()
1213 segs = segs->next; in vnet_handle_offloads()
/linux-4.4.14/drivers/media/dvb-frontends/
Dmb86a20s.c1447 const struct linear_segments *segs; in mb86a20s_get_blk_error_layer_CNR() local
1499 segs = cnr_qpsk_table; in mb86a20s_get_blk_error_layer_CNR()
1503 segs = cnr_16qam_table; in mb86a20s_get_blk_error_layer_CNR()
1508 segs = cnr_64qam_table; in mb86a20s_get_blk_error_layer_CNR()
1512 cnr = interpolate_value(mer, segs, segs_len); in mb86a20s_get_blk_error_layer_CNR()
/linux-4.4.14/net/core/
Dskbuff.c700 void kfree_skb_list(struct sk_buff *segs) in kfree_skb_list() argument
702 while (segs) { in kfree_skb_list()
703 struct sk_buff *next = segs->next; in kfree_skb_list()
705 kfree_skb(segs); in kfree_skb_list()
706 segs = next; in kfree_skb_list()
3002 struct sk_buff *segs = NULL; in skb_segment() local
3103 if (segs) in skb_segment()
3106 segs = nskb; in skb_segment()
3206 segs->prev = tail; in skb_segment()
3217 return segs; in skb_segment()
[all …]
Ddev.c2499 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); in skb_mac_gso_segment() local
2512 segs = ptype->callbacks.gso_segment(skb, features); in skb_mac_gso_segment()
2520 return segs; in skb_mac_gso_segment()
2774 struct sk_buff *segs; in validate_xmit_skb() local
2776 segs = skb_gso_segment(skb, features); in validate_xmit_skb()
2777 if (IS_ERR(segs)) { in validate_xmit_skb()
2779 } else if (segs) { in validate_xmit_skb()
2781 skb = segs; in validate_xmit_skb()
/linux-4.4.14/drivers/net/ethernet/intel/e1000/
De1000.h158 unsigned short segs; member
De1000_main.c2839 unsigned int f, bytecount, segs; in e1000_tx_map() local
2948 segs = skb_shinfo(skb)->gso_segs ?: 1; in e1000_tx_map()
2950 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; in e1000_tx_map()
2953 tx_ring->buffer_info[i].segs = segs; in e1000_tx_map()
3861 total_tx_packets += buffer_info->segs; in e1000_clean_tx_irq()
/linux-4.4.14/drivers/net/ethernet/myricom/myri10ge/
Dmyri10ge.c3087 struct sk_buff *segs, *curr; in myri10ge_sw_tso() local
3092 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); in myri10ge_sw_tso()
3093 if (IS_ERR(segs)) in myri10ge_sw_tso()
3096 while (segs) { in myri10ge_sw_tso()
3097 curr = segs; in myri10ge_sw_tso()
3098 segs = segs->next; in myri10ge_sw_tso()
3103 if (segs != NULL) { in myri10ge_sw_tso()
3104 curr = segs; in myri10ge_sw_tso()
3105 segs = segs->next; in myri10ge_sw_tso()
3107 dev_kfree_skb_any(segs); in myri10ge_sw_tso()
/linux-4.4.14/block/
Dblk-merge.c86 unsigned *segs) in blk_bio_segment_split() argument
152 *segs = nsegs; in blk_bio_segment_split()
/linux-4.4.14/drivers/staging/octeon/
Dethernet-tx.c265 pko_command.s.segs = 1; in cvm_oct_xmit()
294 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; in cvm_oct_xmit()
/linux-4.4.14/drivers/scsi/
Dvmw_pvscsi.c368 int segs = scsi_dma_map(cmd); in pvscsi_map_buffers() local
369 if (segs > 1) { in pvscsi_map_buffers()
370 pvscsi_create_sg(ctx, sg, segs); in pvscsi_map_buffers()
Dosst.c5333 int segs, nbr, max_segs, b_size, order, got; in enlarge_buffer() local
5369 for (segs=STbuffer->sg_segs=1, got=b_size; in enlarge_buffer()
5370 segs < max_segs && got < OS_FRAME_SIZE; ) { in enlarge_buffer()
5372 STbuffer->sg[segs].offset = 0; in enlarge_buffer()
5382 …sg_set_page(&STbuffer->sg[segs], page, (OS_FRAME_SIZE - got <= PAGE_SIZE / 2) ? (OS_FRAME_SIZE - g… in enlarge_buffer()
5383 got += STbuffer->sg[segs].length; in enlarge_buffer()
5385 STbuffer->sg_segs = ++segs; in enlarge_buffer()
5395 STbuffer->sg[segs-1].length, page_address(STbuffer->sg[segs-1].page)); in enlarge_buffer()
Dst.c3803 int segs, max_segs, b_size, order, got; in enlarge_buffer() local
3839 for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size; in enlarge_buffer()
3840 segs < max_segs && got < new_size;) { in enlarge_buffer()
3853 STbuffer->reserved_pages[segs] = page; in enlarge_buffer()
3854 segs++; in enlarge_buffer()
/linux-4.4.14/drivers/net/usb/
Dr8152.c1388 struct sk_buff *segs, *nskb; in r8152_csum_workaround() local
1391 segs = skb_gso_segment(skb, features); in r8152_csum_workaround()
1392 if (IS_ERR(segs) || !segs) in r8152_csum_workaround()
1398 nskb = segs; in r8152_csum_workaround()
1399 segs = segs->next; in r8152_csum_workaround()
1402 } while (segs); in r8152_csum_workaround()
/linux-4.4.14/drivers/net/ethernet/intel/e1000e/
De1000.h158 unsigned int segs; member
Dnetdev.c1242 total_tx_packets += buffer_info->segs; in e1000_clean_tx_irq()
5469 unsigned int f, bytecount, segs; in e1000_tx_map() local
5529 segs = skb_shinfo(skb)->gso_segs ? : 1; in e1000_tx_map()
5531 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; in e1000_tx_map()
5534 tx_ring->buffer_info[i].segs = segs; in e1000_tx_map()
/linux-4.4.14/drivers/net/ethernet/intel/igbvf/
Dnetdev.c827 unsigned int segs, bytecount; in igbvf_clean_tx_irq() local
830 segs = skb_shinfo(skb)->gso_segs ?: 1; in igbvf_clean_tx_irq()
832 bytecount = ((segs - 1) * skb_headlen(skb)) + in igbvf_clean_tx_irq()
834 total_packets += segs; in igbvf_clean_tx_irq()
/linux-4.4.14/net/mac80211/
Dtx.c2877 struct sk_buff *segs; in __ieee80211_subif_start_xmit() local
2879 segs = skb_gso_segment(skb, 0); in __ieee80211_subif_start_xmit()
2880 if (IS_ERR(segs)) { in __ieee80211_subif_start_xmit()
2882 } else if (segs) { in __ieee80211_subif_start_xmit()
2884 skb = segs; in __ieee80211_subif_start_xmit()
/linux-4.4.14/arch/mips/cavium-octeon/executive/
Dcvmx-helper.c895 pko_command.s.segs = num_segs; in __cvmx_helper_errata_fix_ipd_ptr_alignment()
/linux-4.4.14/drivers/net/ethernet/realtek/
Dr8169.c6943 struct sk_buff *segs, *nskb; in r8169_csum_workaround() local
6946 segs = skb_gso_segment(skb, features); in r8169_csum_workaround()
6947 if (IS_ERR(segs) || !segs) in r8169_csum_workaround()
6951 nskb = segs; in r8169_csum_workaround()
6952 segs = segs->next; in r8169_csum_workaround()
6955 } while (segs); in r8169_csum_workaround()
/linux-4.4.14/drivers/scsi/aic7xxx/
Daic7xxx_core.c4713 ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) in ahc_dmamap_cb() argument
4718 *baddr = segs->ds_addr; in ahc_dmamap_cb()
4971 struct ahc_dma_seg *segs; in ahc_alloc_scbs() local
5001 segs = sg_map->sg_vaddr; in ahc_alloc_scbs()
5016 next_scb->sg_list = segs; in ahc_alloc_scbs()
5034 segs += AHC_NSEG; in ahc_alloc_scbs()
Daic79xx_core.c6382 ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) in ahd_dmamap_cb() argument
6387 *baddr = segs->ds_addr; in ahd_dmamap_cb()
6814 uint8_t *segs; in ahd_alloc_scbs() local
6865 segs = sg_map->vaddr + offset; in ahd_alloc_scbs()
6887 segs = sg_map->vaddr; in ahd_alloc_scbs()
6956 next_scb->sg_list = segs; in ahd_alloc_scbs()
6992 segs += ahd_sglist_size(ahd); in ahd_alloc_scbs()
Daic7xxx.seq735 /* Did we just finish fetching segs? */
Daic79xx.seq1475 /* Did we just finish fetching segs? */
/linux-4.4.14/fs/cifs/
Dconnect.c560 unsigned int segs; in cifs_readv_from_socket() local
579 segs = kvec_array_init(iov, iov_orig, nr_segs, total_read); in cifs_readv_from_socket()
582 iov, segs, to_read, 0); in cifs_readv_from_socket()
/linux-4.4.14/drivers/scsi/qla2xxx/
Dqla_os.c5323 { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
5324 { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
5325 { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
5326 { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
Dqla_def.h2580 uint32_t segs[4]; member
Dqla_init.c5557 seg = blob->segs; in qla2x00_load_risc()
/linux-4.4.14/drivers/net/ethernet/broadcom/bnxt/
Dbnxt.c2919 u32 nsegs, n, segs = 0, flags; in bnxt_hwrm_vnic_set_tpa() local
2948 segs = ilog2(nsegs); in bnxt_hwrm_vnic_set_tpa()
2949 req.max_agg_segs = cpu_to_le16(segs); in bnxt_hwrm_vnic_set_tpa()
/linux-4.4.14/drivers/net/ethernet/broadcom/
Dtg3.c7852 struct sk_buff *segs, *nskb; in tg3_tso_bug() local
7871 segs = skb_gso_segment(skb, tp->dev->features & in tg3_tso_bug()
7873 if (IS_ERR(segs) || !segs) in tg3_tso_bug()
7877 nskb = segs; in tg3_tso_bug()
7878 segs = segs->next; in tg3_tso_bug()
7881 } while (segs); in tg3_tso_bug()