segs 886 arch/mips/cavium-octeon/executive/cvmx-helper.c pko_command.s.segs = num_segs; segs 121 arch/mips/include/asm/octeon/cvmx-pko-defs.h uint64_t segs:6; segs 125 arch/mips/include/asm/octeon/cvmx-pko-defs.h uint64_t segs:6; segs 164 arch/mips/include/asm/octeon/cvmx-pko-defs.h uint64_t segs:6; segs 168 arch/mips/include/asm/octeon/cvmx-pko-defs.h uint64_t segs:6; segs 263 arch/mips/include/asm/octeon/cvmx-pko-defs.h uint64_t segs:6; segs 267 arch/mips/include/asm/octeon/cvmx-pko-defs.h uint64_t segs:6; segs 242 arch/mips/include/asm/octeon/cvmx-pko.h uint64_t segs:6; segs 247 arch/mips/include/asm/octeon/cvmx-pko.h uint64_t segs:6; segs 2178 arch/powerpc/platforms/powernv/pci-ioda.c unsigned int tce32_segsz, base, segs, avail, i; segs 2191 arch/powerpc/platforms/powernv/pci-ioda.c segs = (weight * phb->ioda.dma32_count) / total_weight; segs 2192 arch/powerpc/platforms/powernv/pci-ioda.c if (!segs) segs 2193 arch/powerpc/platforms/powernv/pci-ioda.c segs = 1; segs 2202 arch/powerpc/platforms/powernv/pci-ioda.c for (base = 0; base <= phb->ioda.dma32_count - segs; base++) { segs 2203 arch/powerpc/platforms/powernv/pci-ioda.c for (avail = 0, i = base; i < base + segs; i++) { segs 2209 arch/powerpc/platforms/powernv/pci-ioda.c if (avail == segs) segs 2212 arch/powerpc/platforms/powernv/pci-ioda.c } while (--segs); segs 2214 arch/powerpc/platforms/powernv/pci-ioda.c if (!segs) { segs 2230 arch/powerpc/platforms/powernv/pci-ioda.c weight, total_weight, base, segs); segs 2233 arch/powerpc/platforms/powernv/pci-ioda.c (base + segs) * PNV_IODA1_DMA32_SEGSIZE - 1); segs 2245 arch/powerpc/platforms/powernv/pci-ioda.c get_order(tce32_segsz * segs)); segs 2251 arch/powerpc/platforms/powernv/pci-ioda.c memset(addr, 0, tce32_segsz * segs); segs 2254 arch/powerpc/platforms/powernv/pci-ioda.c for (i = 0; i < segs; i++) { segs 2268 arch/powerpc/platforms/powernv/pci-ioda.c for (i = base; i < base + segs; i++) segs 2272 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_setup_iommu_table(tbl, addr, tce32_segsz * segs, segs 2288 arch/powerpc/platforms/powernv/pci-ioda.c __free_pages(tce_mem, get_order(tce32_segsz * segs)); segs 2652 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); segs 2653 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); segs 2654 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); segs 2655 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); segs 2656 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); segs 2657 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); segs 2663 arch/x86/kvm/vmx/vmx.c vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); segs 2675 arch/x86/kvm/vmx/vmx.c fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); segs 2676 arch/x86/kvm/vmx/vmx.c fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); segs 2677 arch/x86/kvm/vmx/vmx.c fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); segs 2678 arch/x86/kvm/vmx/vmx.c fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); segs 2679 arch/x86/kvm/vmx/vmx.c fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); segs 2680 arch/x86/kvm/vmx/vmx.c fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); segs 2722 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); segs 2723 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); segs 2724 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); segs 2725 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); segs 2726 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); segs 2727 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); segs 2728 arch/x86/kvm/vmx/vmx.c vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); segs 2755 arch/x86/kvm/vmx/vmx.c fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); segs 2756 arch/x86/kvm/vmx/vmx.c fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); segs 2757 arch/x86/kvm/vmx/vmx.c fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); segs 2758 arch/x86/kvm/vmx/vmx.c fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); segs 2759 arch/x86/kvm/vmx/vmx.c fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); segs 2760 arch/x86/kvm/vmx/vmx.c fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); segs 3085 arch/x86/kvm/vmx/vmx.c *var = vmx->rmode.segs[seg]; segs 3166 arch/x86/kvm/vmx/vmx.c vmx->rmode.segs[seg] = *var; segs 3170 arch/x86/kvm/vmx/vmx.c fix_rmode_seg(seg, &vmx->rmode.segs[seg]); segs 236 arch/x86/kvm/vmx/vmx.h struct kvm_segment segs[8]; segs 243 block/blk-merge.c unsigned *segs) segs 273 block/blk-merge.c *segs = nsegs; segs 276 block/blk-merge.c *segs = nsegs; segs 1247 drivers/block/xen-blkfront.c int i, j, segs; segs 1288 drivers/block/xen-blkfront.c segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ? segs 1291 drivers/block/xen-blkfront.c for (j = 0; j < segs; j++) { segs 1306 drivers/block/xen-blkfront.c for (j = 0; j < INDIRECT_GREFS(segs); j++) { segs 2022 drivers/block/xen-blkfront.c unsigned int segs; segs 2027 drivers/block/xen-blkfront.c segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; segs 2028 drivers/block/xen-blkfront.c blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG); segs 2053 drivers/block/xen-blkfront.c BUG_ON(req->nr_phys_segments > segs); segs 1104 drivers/infiniband/hw/hfi1/tid_rdma.c sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; segs 1105 drivers/infiniband/hw/hfi1/tid_rdma.c sge->length = sge->mr->map[sge->m]->segs[sge->n].length; segs 192 drivers/infiniband/hw/qib/qib_ud.c sge->mr->map[sge->m]->segs[sge->n].vaddr; segs 194 drivers/infiniband/hw/qib/qib_ud.c sge->mr->map[sge->m]->segs[sge->n].length; segs 168 drivers/infiniband/hw/qib/qib_verbs.c sge.mr->map[sge.m]->segs[sge.n].vaddr; segs 170 drivers/infiniband/hw/qib/qib_verbs.c sge.mr->map[sge.m]->segs[sge.n].length; segs 201 drivers/infiniband/hw/qib/qib_verbs.c sge->mr->map[sge->m]->segs[sge->n].vaddr; segs 203 drivers/infiniband/hw/qib/qib_verbs.c sge->mr->map[sge->m]->segs[sge->n].length; segs 423 drivers/infiniband/sw/rdmavt/mr.c mr->mr.map[m]->segs[n].vaddr = vaddr; segs 424 drivers/infiniband/sw/rdmavt/mr.c mr->mr.map[m]->segs[n].length = PAGE_SIZE; segs 613 drivers/infiniband/sw/rdmavt/mr.c mr->mr.map[m]->segs[n].vaddr = (void *)addr; segs 614 drivers/infiniband/sw/rdmavt/mr.c mr->mr.map[m]->segs[n].length = ps; segs 643 drivers/infiniband/sw/rdmavt/mr.c mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr; segs 811 drivers/infiniband/sw/rdmavt/mr.c fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i]; segs 812 drivers/infiniband/sw/rdmavt/mr.c fmr->mr.map[m]->segs[n].length = ps; segs 985 drivers/infiniband/sw/rdmavt/mr.c while (off >= mr->map[m]->segs[n].length) { segs 986 drivers/infiniband/sw/rdmavt/mr.c off -= mr->map[m]->segs[n].length; segs 995 drivers/infiniband/sw/rdmavt/mr.c isge->vaddr = mr->map[m]->segs[n].vaddr + off; segs 996 drivers/infiniband/sw/rdmavt/mr.c isge->length = mr->map[m]->segs[n].length - off; segs 1092 drivers/infiniband/sw/rdmavt/mr.c while (off >= mr->map[m]->segs[n].length) { segs 1093 drivers/infiniband/sw/rdmavt/mr.c off -= mr->map[m]->segs[n].length; segs 1102 drivers/infiniband/sw/rdmavt/mr.c sge->vaddr = mr->map[m]->segs[n].vaddr + off; segs 1103 drivers/infiniband/sw/rdmavt/mr.c sge->length = mr->map[m]->segs[n].length - off; segs 1436 drivers/media/dvb-frontends/mb86a20s.c const struct linear_segments *segs; segs 1488 drivers/media/dvb-frontends/mb86a20s.c segs = cnr_qpsk_table; segs 1492 drivers/media/dvb-frontends/mb86a20s.c segs = cnr_16qam_table; segs 1497 drivers/media/dvb-frontends/mb86a20s.c segs = cnr_64qam_table; segs 1501 drivers/media/dvb-frontends/mb86a20s.c cnr = interpolate_value(mer, segs, segs_len); segs 1469 drivers/net/ethernet/broadcom/bnxt/bnxt.c u16 segs; segs 1471 drivers/net/ethernet/broadcom/bnxt/bnxt.c segs = TPA_END_TPA_SEGS(tpa_end); segs 1472 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (segs == 1) segs 1475 drivers/net/ethernet/broadcom/bnxt/bnxt.c NAPI_GRO_CB(skb)->count = segs; segs 4751 drivers/net/ethernet/broadcom/bnxt/bnxt.c u32 nsegs, n, segs = 0, flags; segs 4782 drivers/net/ethernet/broadcom/bnxt/bnxt.c segs = MAX_TPA_SEGS_P5; segs 4785 drivers/net/ethernet/broadcom/bnxt/bnxt.c segs = ilog2(nsegs); segs 4787 drivers/net/ethernet/broadcom/bnxt/bnxt.c req.max_agg_segs = cpu_to_le16(segs); segs 3048 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c info->segs = le16_to_cpu(*((__le16 *)(resp + segs 3050 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c if (!info->segs) { segs 3055 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c info->dest_buf_size = info->segs * segs 3108 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c coredump->total_segs = info.segs; segs 42 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h u16 segs; segs 7877 drivers/net/ethernet/broadcom/tg3.c struct sk_buff *segs, *nskb; segs 7896 drivers/net/ethernet/broadcom/tg3.c segs = skb_gso_segment(skb, tp->dev->features & segs 7898 drivers/net/ethernet/broadcom/tg3.c if (IS_ERR(segs) || !segs) segs 7902 drivers/net/ethernet/broadcom/tg3.c nskb = segs; segs 7903 drivers/net/ethernet/broadcom/tg3.c segs = segs->next; segs 7906 drivers/net/ethernet/broadcom/tg3.c } while (segs); segs 132 drivers/net/ethernet/intel/e1000/e1000.h unsigned short segs; segs 2830 drivers/net/ethernet/intel/e1000/e1000_main.c unsigned int f, bytecount, segs; segs 2938 drivers/net/ethernet/intel/e1000/e1000_main.c segs = skb_shinfo(skb)->gso_segs ?: 1; segs 2940 drivers/net/ethernet/intel/e1000/e1000_main.c bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; segs 2943 drivers/net/ethernet/intel/e1000/e1000_main.c tx_ring->buffer_info[i].segs = segs; segs 3846 drivers/net/ethernet/intel/e1000/e1000_main.c total_tx_packets += buffer_info->segs; segs 136 drivers/net/ethernet/intel/e1000e/e1000.h unsigned int segs; segs 1235 drivers/net/ethernet/intel/e1000e/netdev.c total_tx_packets += buffer_info->segs; segs 5563 drivers/net/ethernet/intel/e1000e/netdev.c unsigned int f, bytecount, segs; segs 5622 drivers/net/ethernet/intel/e1000e/netdev.c segs = skb_shinfo(skb)->gso_segs ? : 1; segs 5624 drivers/net/ethernet/intel/e1000e/netdev.c bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; segs 5627 drivers/net/ethernet/intel/e1000e/netdev.c tx_ring->buffer_info[i].segs = segs; segs 805 drivers/net/ethernet/intel/igbvf/netdev.c unsigned int segs, bytecount; segs 808 drivers/net/ethernet/intel/igbvf/netdev.c segs = skb_shinfo(skb)->gso_segs ?: 1; segs 810 drivers/net/ethernet/intel/igbvf/netdev.c bytecount = ((segs - 1) * skb_headlen(skb)) + segs 812 drivers/net/ethernet/intel/igbvf/netdev.c total_packets += segs; segs 2895 drivers/net/ethernet/myricom/myri10ge/myri10ge.c struct sk_buff *segs, *curr; segs 2900 drivers/net/ethernet/myricom/myri10ge/myri10ge.c segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); segs 2901 drivers/net/ethernet/myricom/myri10ge/myri10ge.c if (IS_ERR(segs)) segs 2904 drivers/net/ethernet/myricom/myri10ge/myri10ge.c while (segs) { segs 2905 drivers/net/ethernet/myricom/myri10ge/myri10ge.c curr = segs; segs 2906 drivers/net/ethernet/myricom/myri10ge/myri10ge.c segs = segs->next; segs 2911 drivers/net/ethernet/myricom/myri10ge/myri10ge.c if (segs != NULL) { segs 2912 drivers/net/ethernet/myricom/myri10ge/myri10ge.c curr = segs; segs 2913 drivers/net/ethernet/myricom/myri10ge/myri10ge.c segs = segs->next; segs 2915 drivers/net/ethernet/myricom/myri10ge/myri10ge.c dev_kfree_skb_any(segs); segs 347 drivers/net/ethernet/qlogic/qed/qed_cxt.c struct qed_tid_seg *segs = p_cfg->tid_seg; segs 353 drivers/net/ethernet/qlogic/qed/qed_cxt.c iids->pf_tids[j] += segs[j].count; segs 359 drivers/net/ethernet/qlogic/qed/qed_cxt.c iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; segs 377 drivers/net/ethernet/qlogic/qed/qed_cxt.c struct qed_tid_seg *segs; segs 385 drivers/net/ethernet/qlogic/qed/qed_cxt.c segs = p_mngr->conn_cfg[type].tid_seg; segs 390 drivers/net/ethernet/qlogic/qed/qed_cxt.c iids->tids += segs[j].count; segs 396 drivers/net/ethernet/qlogic/qed/qed_cxt.c vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; segs 1226 drivers/net/ethernet/sun/sunvnet_common.c struct sk_buff *segs; segs 1277 drivers/net/ethernet/sun/sunvnet_common.c segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); segs 1278 drivers/net/ethernet/sun/sunvnet_common.c if (IS_ERR(segs)) segs 1285 drivers/net/ethernet/sun/sunvnet_common.c while (segs) { segs 1286 drivers/net/ethernet/sun/sunvnet_common.c struct sk_buff *curr = segs; segs 1288 drivers/net/ethernet/sun/sunvnet_common.c segs = segs->next; segs 343 drivers/net/tap.c struct sk_buff *segs = __skb_gso_segment(skb, features, false); segs 345 drivers/net/tap.c if (IS_ERR(segs)) segs 348 drivers/net/tap.c if (!segs) { segs 355 drivers/net/tap.c while (segs) { segs 356 drivers/net/tap.c struct sk_buff *nskb = segs->next; segs 358 drivers/net/tap.c segs->next = NULL; segs 359 drivers/net/tap.c if (ptr_ring_produce(&q->ring, segs)) { segs 360 drivers/net/tap.c kfree_skb(segs); segs 364 drivers/net/tap.c segs = nskb; segs 1714 drivers/net/usb/r8152.c struct sk_buff *segs, *nskb; segs 1717 drivers/net/usb/r8152.c segs = skb_gso_segment(skb, features); segs 1718 drivers/net/usb/r8152.c if (IS_ERR(segs) || !segs) segs 1724 drivers/net/usb/r8152.c nskb = segs; segs 1725 drivers/net/usb/r8152.c segs = segs->next; segs 1728 drivers/net/usb/r8152.c } while (segs); segs 6368 drivers/scsi/aic7xxx/aic79xx_core.c ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) segs 6373 drivers/scsi/aic7xxx/aic79xx_core.c *baddr = segs->ds_addr; segs 6800 drivers/scsi/aic7xxx/aic79xx_core.c uint8_t *segs; segs 6851 drivers/scsi/aic7xxx/aic79xx_core.c segs = sg_map->vaddr + offset; segs 6873 drivers/scsi/aic7xxx/aic79xx_core.c segs = sg_map->vaddr; segs 6939 drivers/scsi/aic7xxx/aic79xx_core.c next_scb->sg_list = segs; segs 6966 drivers/scsi/aic7xxx/aic79xx_core.c segs += ahd_sglist_size(ahd); segs 4700 drivers/scsi/aic7xxx/aic7xxx_core.c ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) segs 4705 drivers/scsi/aic7xxx/aic7xxx_core.c *baddr = segs->ds_addr; segs 4964 drivers/scsi/aic7xxx/aic7xxx_core.c struct ahc_dma_seg *segs; segs 4994 drivers/scsi/aic7xxx/aic7xxx_core.c segs = sg_map->sg_vaddr; segs 5007 drivers/scsi/aic7xxx/aic7xxx_core.c next_scb->sg_list = segs; segs 5019 drivers/scsi/aic7xxx/aic7xxx_core.c segs += AHC_NSEG; segs 3121 drivers/scsi/qla2xxx/qla_def.h uint32_t segs[4]; segs 7820 drivers/scsi/qla2xxx/qla_init.c seg = blob->segs; segs 6760 drivers/scsi/qla2xxx/qla_os.c { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, segs 6761 drivers/scsi/qla2xxx/qla_os.c { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, segs 6762 drivers/scsi/qla2xxx/qla_os.c { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, segs 6763 drivers/scsi/qla2xxx/qla_os.c { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, }, segs 3908 drivers/scsi/st.c int segs, max_segs, b_size, order, got; segs 3944 drivers/scsi/st.c for (segs = STbuffer->frp_segs, got = STbuffer->buffer_size; segs 3945 drivers/scsi/st.c segs < max_segs && got < new_size;) { segs 3958 drivers/scsi/st.c STbuffer->reserved_pages[segs] = page; segs 3959 drivers/scsi/st.c segs++; segs 365 drivers/scsi/vmw_pvscsi.c int segs = scsi_dma_map(cmd); segs 367 drivers/scsi/vmw_pvscsi.c if (segs == -ENOMEM) { segs 371 drivers/scsi/vmw_pvscsi.c } else if (segs > 1) { segs 372 drivers/scsi/vmw_pvscsi.c pvscsi_create_sg(ctx, sg, segs); segs 254 drivers/staging/octeon/ethernet-tx.c pko_command.s.segs = 1; segs 283 drivers/staging/octeon/ethernet-tx.c pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; segs 1144 drivers/staging/octeon/octeon-stubs.h uint64_t segs:6; segs 143 drivers/staging/wusbcore/wa-xfer.c u8 segs, segs_submitted, segs_done; segs 177 drivers/staging/wusbcore/wa-xfer.c for (cnt = 0; cnt < xfer->segs; cnt++) { segs 335 drivers/staging/wusbcore/wa-xfer.c for (cnt = 0; cnt < xfer->segs; cnt++) { segs 349 drivers/staging/wusbcore/wa-xfer.c && cnt != xfer->segs-1) segs 455 drivers/staging/wusbcore/wa-xfer.c while (seg_index < xfer->segs) { segs 626 drivers/staging/wusbcore/wa-xfer.c xfer->segs = 0; segs 635 drivers/staging/wusbcore/wa-xfer.c ++xfer->segs; segs 638 drivers/staging/wusbcore/wa-xfer.c xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, segs 640 drivers/staging/wusbcore/wa-xfer.c if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) segs 641 drivers/staging/wusbcore/wa-xfer.c xfer->segs = 1; segs 644 drivers/staging/wusbcore/wa-xfer.c if (xfer->segs > WA_SEGS_MAX) { segs 1170 drivers/staging/wusbcore/wa-xfer.c xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC); segs 1175 drivers/staging/wusbcore/wa-xfer.c for (cnt = 0; cnt < xfer->segs; cnt++) { segs 1311 drivers/staging/wusbcore/wa-xfer.c xfer, xfer->segs, result); segs 1324 drivers/staging/wusbcore/wa-xfer.c for (cnt = 1; cnt < xfer->segs; cnt++) { segs 1352 drivers/staging/wusbcore/wa-xfer.c for (cnt = 1; cnt < xfer->segs; cnt++) { segs 1561 drivers/staging/wusbcore/wa-xfer.c for (cnt = 0; cnt < xfer->segs; cnt++) { segs 1980 drivers/staging/wusbcore/wa-xfer.c for (cnt = 0; cnt < xfer->segs; cnt++) { segs 2293 drivers/staging/wusbcore/wa-xfer.c if (unlikely(seg_idx >= xfer->segs)) segs 2459 drivers/staging/wusbcore/wa-xfer.c if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs)) segs 1426 fs/binfmt_elf.c static void fill_elf_header(struct elfhdr *elf, int segs, segs 1444 fs/binfmt_elf.c elf->e_phnum = segs; segs 2162 fs/binfmt_elf.c elf_addr_t e_shoff, int segs) segs 2174 fs/binfmt_elf.c shdr4extnum->sh_info = segs; segs 2188 fs/binfmt_elf.c int segs, i; segs 2220 fs/binfmt_elf.c segs = current->mm->map_count; segs 2221 fs/binfmt_elf.c segs += elf_core_extra_phdrs(); segs 2225 fs/binfmt_elf.c segs++; segs 2228 fs/binfmt_elf.c segs++; segs 2233 fs/binfmt_elf.c e_phnum = segs > PN_XNUM ? PN_XNUM : segs; segs 2248 fs/binfmt_elf.c offset += segs * sizeof(struct elf_phdr); /* Program headers */ segs 2266 fs/binfmt_elf.c if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz)) segs 2268 fs/binfmt_elf.c vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)), segs 2290 fs/binfmt_elf.c fill_extnum_info(elf, shdr4extnum, e_shoff, segs); segs 764 fs/binfmt_elf_fdpic.c seg = loadmap->segs; segs 785 fs/binfmt_elf_fdpic.c seg = loadmap->segs; segs 810 fs/binfmt_elf_fdpic.c seg = loadmap->segs; segs 831 fs/binfmt_elf_fdpic.c seg = loadmap->segs; segs 867 fs/binfmt_elf_fdpic.c mseg = loadmap->segs; segs 895 fs/binfmt_elf_fdpic.c seg = loadmap->segs; segs 926 fs/binfmt_elf_fdpic.c seg = params->loadmap->segs; segs 1017 fs/binfmt_elf_fdpic.c seg = params->loadmap->segs; segs 1290 fs/binfmt_elf_fdpic.c static inline void fill_elf_fdpic_header(struct elfhdr *elf, int segs) segs 1308 fs/binfmt_elf_fdpic.c elf->e_phnum = segs; segs 1475 fs/binfmt_elf_fdpic.c elf_addr_t e_shoff, int segs) segs 1487 fs/binfmt_elf_fdpic.c shdr4extnum->sh_info = segs; segs 1553 fs/binfmt_elf_fdpic.c int segs; segs 1635 fs/binfmt_elf_fdpic.c segs = current->mm->map_count; segs 1636 fs/binfmt_elf_fdpic.c segs += elf_core_extra_phdrs(); segs 1639 fs/binfmt_elf_fdpic.c segs++; segs 1644 fs/binfmt_elf_fdpic.c e_phnum = segs > PN_XNUM ? PN_XNUM : segs; segs 1685 fs/binfmt_elf_fdpic.c offset += segs * sizeof(struct elf_phdr); /* Program headers */ segs 1715 fs/binfmt_elf_fdpic.c fill_extnum_info(elf, shdr4extnum, e_shoff, segs); segs 2811 fs/btrfs/check-integrity.c unsigned int segs = bio_segments(bio); segs 2818 fs/btrfs/check-integrity.c bio_op(bio), bio->bi_opf, segs, segs 2822 fs/btrfs/check-integrity.c mapped_datav = kmalloc_array(segs, segs 2840 fs/btrfs/check-integrity.c mapped_datav, segs, segs 8020 fs/btrfs/inode.c int segs; segs 8038 fs/btrfs/inode.c segs = bio_segments(failed_bio); segs 8040 fs/btrfs/inode.c if (segs > 1 || segs 1920 fs/f2fs/f2fs.h unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> segs 1923 fs/f2fs/f2fs.h return segs / sbi->segs_per_sec; segs 1433 fs/f2fs/gc.c int segs = secs * sbi->segs_per_sec; segs 1436 fs/f2fs/gc.c raw_sb->segment_count = cpu_to_le32(segment_count + segs); segs 1437 fs/f2fs/gc.c raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs); segs 1439 fs/f2fs/gc.c (long long)segs * sbi->blocks_per_seg); segs 1444 fs/f2fs/gc.c int segs = secs * sbi->segs_per_sec; segs 1448 fs/f2fs/gc.c SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs; segs 1449 fs/f2fs/gc.c MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs; segs 1451 fs/f2fs/gc.c FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs; segs 1453 fs/f2fs/gc.c (long long)segs * sbi->blocks_per_seg); segs 176 include/linux/bio.h unsigned segs = 0; segs 197 include/linux/bio.h segs++; segs 199 include/linux/bio.h return segs; segs 1571 include/linux/blkdev.h unsigned int segs) segs 1573 include/linux/blkdev.h q->limits.max_integrity_segments = segs; segs 1657 include/linux/blkdev.h unsigned int segs) segs 1022 include/linux/skbuff.h void kfree_skb_list(struct sk_buff *segs); segs 494 include/net/ip.h u32 ip_idents_reserve(u32 hash, int segs); segs 495 include/net/ip.h void __ip_select_ident(struct net *net, struct iphdr *iph, int segs); segs 498 include/net/ip.h struct sock *sk, int segs) segs 510 include/net/ip.h inet_sk(sk)->inet_id += segs; segs 515 include/net/ip.h __ip_select_ident(net, iph, segs); segs 2332 include/net/sock.h int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs); segs 2334 include/net/sock.h atomic_add(segs, &sk->sk_drops); segs 570 include/net/tcp.h int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); segs 571 include/net/tcp.h int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); segs 953 include/net/tcp.h static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs) segs 955 include/net/tcp.h TCP_SKB_CB(skb)->tcp_gso_segs = segs; segs 958 include/net/tcp.h static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs) segs 960 include/net/tcp.h TCP_SKB_CB(skb)->tcp_gso_segs += segs; segs 471 include/net/udp.h struct sk_buff *segs; segs 492 include/net/udp.h segs = __skb_gso_segment(skb, features, false); segs 493 include/net/udp.h if (IS_ERR_OR_NULL(segs)) { segs 503 include/net/udp.h return segs; segs 70 include/rdma/rdmavt_mr.h struct rvt_seg segs[RVT_SEGSZ]; segs 175 include/rdma/rdmavt_mr.h sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; segs 176 include/rdma/rdmavt_mr.h sge->length = sge->mr->map[sge->m]->segs[sge->n].length; segs 30 include/uapi/linux/elf-fdpic.h struct elf32_fdpic_loadseg segs[]; segs 6865 lib/test_bpf.c struct sk_buff *skb, *segs; segs 6877 lib/test_bpf.c segs = skb_segment(skb, features); segs 6878 lib/test_bpf.c if (!IS_ERR(segs)) { segs 6879 lib/test_bpf.c kfree_skb_list(segs); segs 2944 net/core/dev.c struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); segs 2957 net/core/dev.c segs = ptype->callbacks.gso_segment(skb, features); segs 2965 net/core/dev.c return segs; segs 2997 net/core/dev.c struct sk_buff *segs; segs 3030 net/core/dev.c segs = skb_mac_gso_segment(skb, features); segs 3032 net/core/dev.c if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) segs 3035 net/core/dev.c return segs; segs 3268 net/core/dev.c struct sk_buff *segs; segs 3270 net/core/dev.c segs = skb_gso_segment(skb, features); segs 3271 net/core/dev.c if (IS_ERR(segs)) { segs 3273 net/core/dev.c } else if (segs) { segs 3275 net/core/dev.c skb = segs; segs 701 net/core/skbuff.c void kfree_skb_list(struct sk_buff *segs) segs 703 net/core/skbuff.c while (segs) { segs 704 net/core/skbuff.c struct sk_buff *next = segs->next; segs 706 net/core/skbuff.c kfree_skb(segs); segs 707 net/core/skbuff.c segs = next; segs 3653 net/core/skbuff.c struct sk_buff *segs = NULL; segs 3820 net/core/skbuff.c if (segs) segs 3823 net/core/skbuff.c segs = nskb; segs 3942 net/core/skbuff.c segs->prev = tail; segs 3956 net/core/skbuff.c for (iter = segs; iter; iter = iter->next) { segs 3965 net/core/skbuff.c else if (tail != segs) segs 3978 net/core/skbuff.c return segs; segs 3981 net/core/skbuff.c kfree_skb_list(segs); segs 1301 net/ipv4/af_inet.c struct sk_buff *segs = ERR_PTR(-EINVAL); segs 1335 net/ipv4/af_inet.c segs = ERR_PTR(-EPROTONOSUPPORT); segs 1348 net/ipv4/af_inet.c segs = ops->callbacks.gso_segment(skb, features); segs 1350 net/ipv4/af_inet.c if (IS_ERR_OR_NULL(segs)) segs 1353 net/ipv4/af_inet.c gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); segs 1355 net/ipv4/af_inet.c skb = segs; segs 1390 net/ipv4/af_inet.c return segs; segs 122 net/ipv4/esp4_offload.c struct sk_buff *segs = ERR_PTR(-EINVAL); segs 128 net/ipv4/esp4_offload.c segs = ops->callbacks.gso_segment(skb, features); segs 130 net/ipv4/esp4_offload.c return segs; segs 18 net/ipv4/gre_offload.c struct sk_buff *segs = ERR_PTR(-EINVAL); segs 49 net/ipv4/gre_offload.c segs = skb_mac_gso_segment(skb, features); segs 50 net/ipv4/gre_offload.c if (IS_ERR_OR_NULL(segs)) { segs 56 net/ipv4/gre_offload.c gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); segs 60 net/ipv4/gre_offload.c skb = segs; segs 104 net/ipv4/gre_offload.c return segs; segs 244 net/ipv4/ip_output.c struct sk_buff *segs; segs 267 net/ipv4/ip_output.c segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); segs 268 net/ipv4/ip_output.c if (IS_ERR_OR_NULL(segs)) { segs 276 net/ipv4/ip_output.c struct sk_buff *nskb = segs->next; segs 279 net/ipv4/ip_output.c skb_mark_not_on_list(segs); segs 280 net/ipv4/ip_output.c err = ip_fragment(net, sk, segs, mtu, ip_finish_output2); segs 284 net/ipv4/ip_output.c segs = nskb; segs 285 net/ipv4/ip_output.c } while (segs); segs 487 net/ipv4/route.c u32 ip_idents_reserve(u32 hash, int segs) segs 502 net/ipv4/route.c return atomic_add_return(segs + delta, p_id) - segs; segs 506 net/ipv4/route.c void __ip_select_ident(struct net *net, struct iphdr *iph, int segs) segs 519 net/ipv4/route.c id = ip_idents_reserve(hash, segs); segs 304 net/ipv4/tcp_bbr.c u32 segs, bytes; segs 312 net/ipv4/tcp_bbr.c segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk)); segs 314 net/ipv4/tcp_bbr.c return min(segs, 0x7FU); segs 57 net/ipv4/tcp_offload.c struct sk_buff *segs = ERR_PTR(-EINVAL); segs 89 net/ipv4/tcp_offload.c segs = NULL; segs 98 net/ipv4/tcp_offload.c segs = skb_segment(skb, features); segs 99 net/ipv4/tcp_offload.c if (IS_ERR(segs)) segs 103 net/ipv4/tcp_offload.c segs->ooo_okay = ooo_okay; segs 109 net/ipv4/tcp_offload.c if (skb_is_gso(segs)) segs 110 net/ipv4/tcp_offload.c mss *= skb_shinfo(segs)->gso_segs; segs 114 net/ipv4/tcp_offload.c skb = segs; segs 119 net/ipv4/tcp_offload.c tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); segs 177 net/ipv4/tcp_offload.c return segs; segs 1732 net/ipv4/tcp_output.c u32 bytes, segs; segs 1743 net/ipv4/tcp_output.c segs = max_t(u32, bytes / mss_now, min_tso_segs); segs 1745 net/ipv4/tcp_output.c return segs; segs 2897 net/ipv4/tcp_output.c int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) segs 2943 net/ipv4/tcp_output.c len = cur_mss * segs; segs 2966 net/ipv4/tcp_output.c segs = tcp_skb_pcount(skb); segs 2967 net/ipv4/tcp_output.c TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); segs 2970 net/ipv4/tcp_output.c tp->total_retrans += segs; segs 3006 net/ipv4/tcp_output.c TCP_SKB_CB(skb)->seq, segs, err); segs 3011 net/ipv4/tcp_output.c NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs); segs 3016 net/ipv4/tcp_output.c int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) segs 3019 net/ipv4/tcp_output.c int err = __tcp_retransmit_skb(sk, skb, segs); segs 3062 net/ipv4/tcp_output.c int segs; segs 3071 net/ipv4/tcp_output.c segs = tp->snd_cwnd - tcp_packets_in_flight(tp); segs 3072 net/ipv4/tcp_output.c if (segs <= 0) segs 3078 net/ipv4/tcp_output.c segs = min_t(int, segs, max_segs); segs 3100 net/ipv4/tcp_output.c if (tcp_retransmit_skb(sk, skb, segs)) segs 2103 net/ipv4/udp.c struct sk_buff *next, *segs; segs 2111 net/ipv4/udp.c segs = udp_rcv_segment(sk, skb, true); segs 2112 net/ipv4/udp.c for (skb = segs; skb; skb = next) { segs 22 net/ipv4/udp_offload.c struct sk_buff *segs = ERR_PTR(-EINVAL); segs 82 net/ipv4/udp_offload.c segs = gso_inner_segment(skb, features); segs 83 net/ipv4/udp_offload.c if (IS_ERR_OR_NULL(segs)) { segs 89 net/ipv4/udp_offload.c gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); segs 93 net/ipv4/udp_offload.c skb = segs; segs 145 net/ipv4/udp_offload.c return segs; segs 155 net/ipv4/udp_offload.c struct sk_buff *segs = ERR_PTR(-EINVAL); segs 177 net/ipv4/udp_offload.c segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, segs 183 net/ipv4/udp_offload.c return segs; segs 192 net/ipv4/udp_offload.c struct sk_buff *segs, *seg; segs 210 net/ipv4/udp_offload.c segs = skb_segment(gso_skb, features); segs 211 net/ipv4/udp_offload.c if (IS_ERR_OR_NULL(segs)) { segs 214 net/ipv4/udp_offload.c return segs; segs 221 net/ipv4/udp_offload.c if (skb_is_gso(segs)) segs 222 net/ipv4/udp_offload.c mss *= skb_shinfo(segs)->gso_segs; segs 224 net/ipv4/udp_offload.c seg = segs; segs 284 net/ipv4/udp_offload.c return segs; segs 291 net/ipv4/udp_offload.c struct sk_buff *segs = ERR_PTR(-EINVAL); segs 300 net/ipv4/udp_offload.c segs = skb_udp_tunnel_segment(skb, features, false); segs 343 net/ipv4/udp_offload.c segs = skb_segment(skb, features); segs 345 net/ipv4/udp_offload.c return segs; segs 156 net/ipv6/esp6_offload.c struct sk_buff *segs = ERR_PTR(-EINVAL); segs 162 net/ipv6/esp6_offload.c segs = ops->callbacks.gso_segment(skb, features); segs 164 net/ipv6/esp6_offload.c return segs; segs 74 net/ipv6/ip6_offload.c struct sk_buff *segs = ERR_PTR(-EINVAL); segs 98 net/ipv6/ip6_offload.c segs = ERR_PTR(-EPROTONOSUPPORT); segs 113 net/ipv6/ip6_offload.c segs = ops->callbacks.gso_segment(skb, features); segs 116 net/ipv6/ip6_offload.c if (IS_ERR_OR_NULL(segs)) segs 119 net/ipv6/ip6_offload.c gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); segs 121 net/ipv6/ip6_offload.c for (skb = segs; skb; skb = skb->next) { segs 136 net/ipv6/ip6_offload.c kfree_skb_list(segs); segs 151 net/ipv6/ip6_offload.c return segs; segs 685 net/ipv6/udp.c struct sk_buff *next, *segs; segs 692 net/ipv6/udp.c segs = udp_rcv_segment(sk, skb, false); segs 693 net/ipv6/udp.c for (skb = segs; skb; skb = next) { segs 20 net/ipv6/udp_offload.c struct sk_buff *segs = ERR_PTR(-EINVAL); segs 37 net/ipv6/udp_offload.c segs = skb_udp_tunnel_segment(skb, features, true); segs 107 net/ipv6/udp_offload.c segs = skb_segment(skb, features); segs 111 net/ipv6/udp_offload.c return segs; segs 3880 net/mac80211/tx.c struct sk_buff *segs; segs 3882 net/mac80211/tx.c segs = skb_gso_segment(skb, 0); segs 3883 net/mac80211/tx.c if (IS_ERR(segs)) { segs 3885 net/mac80211/tx.c } else if (segs) { segs 3887 net/mac80211/tx.c skb = segs; segs 21 net/mpls/mpls_gso.c struct sk_buff *segs = ERR_PTR(-EINVAL); segs 44 net/mpls/mpls_gso.c segs = skb_mac_gso_segment(skb, mpls_features); segs 45 net/mpls/mpls_gso.c if (IS_ERR_OR_NULL(segs)) { segs 50 net/mpls/mpls_gso.c skb = segs; segs 66 net/mpls/mpls_gso.c return segs; segs 781 net/netfilter/nfnetlink_queue.c struct sk_buff *skb, *segs; segs 809 net/netfilter/nfnetlink_queue.c segs = skb_gso_segment(skb, 0); segs 814 net/netfilter/nfnetlink_queue.c if (IS_ERR_OR_NULL(segs)) segs 819 net/netfilter/nfnetlink_queue.c struct sk_buff *nskb = segs->next; segs 822 net/netfilter/nfnetlink_queue.c segs, entry); segs 826 net/netfilter/nfnetlink_queue.c kfree_skb(segs); segs 827 net/netfilter/nfnetlink_queue.c segs = nskb; segs 828 net/netfilter/nfnetlink_queue.c } while (segs); segs 79 net/nsh/nsh.c struct sk_buff *segs = ERR_PTR(-EINVAL); segs 108 net/nsh/nsh.c segs = skb_mac_gso_segment(skb, features); segs 109 net/nsh/nsh.c if (IS_ERR_OR_NULL(segs)) { segs 116 net/nsh/nsh.c for (skb = segs; skb; skb = skb->next) { segs 125 net/nsh/nsh.c return segs; segs 303 net/openvswitch/datapath.c struct sk_buff *segs, *nskb; segs 307 net/openvswitch/datapath.c segs = __skb_gso_segment(skb, NETIF_F_SG, false); segs 308 net/openvswitch/datapath.c if (IS_ERR(segs)) segs 309 net/openvswitch/datapath.c return PTR_ERR(segs); segs 310 net/openvswitch/datapath.c if (segs == NULL) segs 323 net/openvswitch/datapath.c skb = segs; segs 325 net/openvswitch/datapath.c if (gso_type & SKB_GSO_UDP && skb != segs) segs 335 net/openvswitch/datapath.c skb = segs; segs 1315 net/sched/sch_cake.c u16 segs = 1; segs 1344 net/sched/sch_cake.c segs = DIV_ROUND_UP(skb->len - hdr_len, segs 1347 net/sched/sch_cake.c segs = shinfo->gso_segs; segs 1350 net/sched/sch_cake.c last_len = skb->len - shinfo->gso_size * (segs - 1); segs 1352 net/sched/sch_cake.c return (cake_calc_overhead(q, len, off) * (segs - 1) + segs 1678 net/sched/sch_cake.c struct sk_buff *segs, *nskb; segs 1682 net/sched/sch_cake.c segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); segs 1683 net/sched/sch_cake.c if (IS_ERR_OR_NULL(segs)) segs 1686 net/sched/sch_cake.c while (segs) { segs 1687 net/sched/sch_cake.c nskb = segs->next; segs 1688 net/sched/sch_cake.c skb_mark_not_on_list(segs); segs 1689 net/sched/sch_cake.c qdisc_skb_cb(segs)->pkt_len = segs->len; segs 1690 net/sched/sch_cake.c cobalt_set_enqueue_time(segs, now); segs 1691 net/sched/sch_cake.c get_cobalt_cb(segs)->adjusted_len = cake_overhead(q, segs 1692 net/sched/sch_cake.c segs); segs 1693 net/sched/sch_cake.c flow_queue_add(flow, segs); segs 1697 net/sched/sch_cake.c slen += segs->len; segs 1698 net/sched/sch_cake.c q->buffer_used += segs->truesize; segs 1700 net/sched/sch_cake.c segs = nskb; segs 415 net/sched/sch_netem.c struct sk_buff *segs; segs 418 net/sched/sch_netem.c segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); segs 420 net/sched/sch_netem.c if (IS_ERR_OR_NULL(segs)) { segs 425 net/sched/sch_netem.c return segs; segs 441 net/sched/sch_netem.c struct sk_buff *segs = NULL; segs 499 net/sched/sch_netem.c segs = skb->next; segs 522 net/sched/sch_netem.c skb->next = segs; segs 595 net/sched/sch_netem.c if (segs) { segs 602 net/sched/sch_netem.c while (segs) { segs 603 net/sched/sch_netem.c skb2 = segs->next; segs 604 net/sched/sch_netem.c skb_mark_not_on_list(segs); segs 605 net/sched/sch_netem.c qdisc_skb_cb(segs)->pkt_len = segs->len; segs 606 net/sched/sch_netem.c last_len = segs->len; segs 607 net/sched/sch_netem.c rc = qdisc_enqueue(segs, sch, to_free); segs 615 net/sched/sch_netem.c segs = skb2; segs 147 net/sched/sch_tbf.c struct sk_buff *segs, *nskb; segs 152 net/sched/sch_tbf.c segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); segs 154 net/sched/sch_tbf.c if (IS_ERR_OR_NULL(segs)) segs 158 net/sched/sch_tbf.c while (segs) { segs 159 net/sched/sch_tbf.c nskb = segs->next; segs 160 net/sched/sch_tbf.c skb_mark_not_on_list(segs); segs 161 net/sched/sch_tbf.c qdisc_skb_cb(segs)->pkt_len = segs->len; segs 162 net/sched/sch_tbf.c len += segs->len; segs 163 net/sched/sch_tbf.c ret = qdisc_enqueue(segs, q->qdisc, to_free); segs 170 net/sched/sch_tbf.c segs = nskb; segs 37 net/sctp/offload.c struct sk_buff *segs = ERR_PTR(-EINVAL); segs 63 net/sctp/offload.c segs = NULL; segs 67 net/sctp/offload.c segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG); segs 68 net/sctp/offload.c if (IS_ERR(segs)) segs 73 net/sctp/offload.c for (skb = segs; skb; skb = skb->next) { segs 82 net/sctp/offload.c return segs; segs 110 net/xfrm/xfrm_device.c struct sk_buff *segs; segs 116 net/xfrm/xfrm_device.c segs = skb_gso_segment(skb, esp_features); segs 117 net/xfrm/xfrm_device.c if (IS_ERR(segs)) { segs 123 net/xfrm/xfrm_device.c skb = segs; segs 536 net/xfrm/xfrm_output.c struct sk_buff *segs; segs 540 net/xfrm/xfrm_output.c segs = skb_gso_segment(skb, 0); segs 542 net/xfrm/xfrm_output.c if (IS_ERR(segs)) segs 543 net/xfrm/xfrm_output.c return PTR_ERR(segs); segs 544 net/xfrm/xfrm_output.c if (segs == NULL) segs 548 net/xfrm/xfrm_output.c struct sk_buff *nskb = segs->next; segs 551 net/xfrm/xfrm_output.c skb_mark_not_on_list(segs); segs 552 net/xfrm/xfrm_output.c err = xfrm_output2(net, sk, segs); segs 559 net/xfrm/xfrm_output.c segs = nskb; segs 560 net/xfrm/xfrm_output.c } while (segs);