Home
last modified time | relevance | path

Searched refs:fragments (Results 1 – 63 of 63) sorted by relevance

/linux-4.1.27/drivers/net/wireless/ipw2x00/
Dlibipw_tx.c186 if (txb->fragments[i]) in libipw_txb_free()
187 dev_kfree_skb_any(txb->fragments[i]); in libipw_txb_free()
206 txb->fragments[i] = __dev_alloc_skb(txb_size + headroom, in libipw_alloc_txb()
208 if (unlikely(!txb->fragments[i])) { in libipw_alloc_txb()
212 skb_reserve(txb->fragments[i], headroom); in libipw_alloc_txb()
216 dev_kfree_skb_any(txb->fragments[i--]); in libipw_alloc_txb()
441 skb_frag = txb->fragments[0]; in libipw_xmit()
467 skb_frag = txb->fragments[i]; in libipw_xmit()
Dipw2200.c10121 txb->fragments[0]->data; in ipw_tx_skb()
10245 txb->fragments[i]->len - hdr_len); in ipw_tx_skb()
10248 txb->fragments[i]->len - hdr_len); in ipw_tx_skb()
10249 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len, in ipw_tx_skb()
10250 txb->fragments[i]->len - hdr_len); in ipw_tx_skb()
10255 txb->fragments[i]->data + hdr_len, in ipw_tx_skb()
10256 txb->fragments[i]->len - hdr_len, in ipw_tx_skb()
10259 cpu_to_le16(txb->fragments[i]->len - hdr_len); in ipw_tx_skb()
10268 remaining_bytes += txb->fragments[j]->len - hdr_len; in ipw_tx_skb()
10276 int size = txb->fragments[j]->len - hdr_len; in ipw_tx_skb()
[all …]
Dlibipw.h497 struct sk_buff *fragments[0]; member
Dipw2100.c3121 fragments[0]->data; in ipw2100_tx_send_data()
3177 fragments[i]->len - LIBIPW_3ADDR_LEN; in ipw2100_tx_send_data()
3181 txb->fragments[i]-> in ipw2100_tx_send_data()
3412 IPW_DEBUG_TX("Sending fragment (%d bytes):\n", txb->fragments[0]->len); in ipw2100_tx()
3413 printk_buf(IPW_DL_TX, txb->fragments[0]->data, txb->fragments[0]->len); in ipw2100_tx()
/linux-4.1.27/net/ipv4/
Dip_fragment.c196 struct sk_buff *head = qp->q.fragments; in ip_expire()
202 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) in ip_expire()
274 rc = qp->q.fragments && (end - start) > max; in ip_frag_too_far()
296 fp = qp->q.fragments; in ip_frag_reinit()
309 qp->q.fragments = NULL; in ip_frag_reinit()
392 for (next = qp->q.fragments; next != NULL; next = next->next) { in ip_frag_queue()
446 qp->q.fragments = next; in ip_frag_queue()
463 qp->q.fragments = skb; in ip_frag_queue()
507 struct sk_buff *fp, *head = qp->q.fragments; in ip_frag_reasm()
533 skb_morph(head, qp->q.fragments); in ip_frag_reasm()
[all …]
Dinet_fragment.c323 fp = q->fragments; in inet_frag_destroy()
/linux-4.1.27/drivers/staging/rtl8192u/ieee80211/
Dieee80211_tx.c256 txb->fragments[i] = dev_alloc_skb(txb_size); in ieee80211_alloc_txb()
257 if (unlikely(!txb->fragments[i])) { in ieee80211_alloc_txb()
261 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb)); in ieee80211_alloc_txb()
265 dev_kfree_skb_any(txb->fragments[i--]); in ieee80211_alloc_txb()
768 skb_frag = txb->fragments[i]; in ieee80211_xmit()
863 memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len); in ieee80211_xmit()
870 cb_desc *tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE); in ieee80211_xmit()
883 ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc); in ieee80211_xmit()
886 ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]); in ieee80211_xmit()
887 ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1); in ieee80211_xmit()
Dieee80211_softmac.c2158 tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE); in ieee80211_softmac_xmit()
2178 skb_queue_tail(&ieee->skb_drv_aggQ[queue_index], txb->fragments[i]); in ieee80211_softmac_xmit()
2180 skb_queue_tail(&ieee->skb_waitQ[queue_index], txb->fragments[i]); in ieee80211_softmac_xmit()
2184 txb->fragments[i], in ieee80211_softmac_xmit()
2211 ieee->tx_pending.txb->fragments[i], in ieee80211_resume_tx()
Dieee80211.h1160 struct sk_buff *fragments[0]; member
/linux-4.1.27/net/batman-adv/
Dfragmentation.c56 chain = &orig_node->fragments[i]; in batadv_frag_purge_orig()
57 spin_lock_bh(&orig_node->fragments[i].lock); in batadv_frag_purge_orig()
60 batadv_frag_clear_chain(&orig_node->fragments[i].head); in batadv_frag_purge_orig()
61 orig_node->fragments[i].size = 0; in batadv_frag_purge_orig()
64 spin_unlock_bh(&orig_node->fragments[i].lock); in batadv_frag_purge_orig()
158 chain = &orig_node->fragments[bucket]; in batadv_frag_insert_packet()
Doriginator.c664 INIT_HLIST_HEAD(&orig_node->fragments[i].head); in batadv_orig_node_new()
665 spin_lock_init(&orig_node->fragments[i].lock); in batadv_orig_node_new()
666 orig_node->fragments[i].size = 0; in batadv_orig_node_new()
Dtypes.h287 struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT]; member
/linux-4.1.27/net/ipv6/
Dreassembly.c156 if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments) in ip6_expire_frag_queue()
163 fq->q.fragments->dev = dev; in ip6_expire_frag_queue()
164 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); in ip6_expire_frag_queue()
295 for (next = fq->q.fragments; next != NULL; next = next->next) { in ip6_frag_queue()
327 fq->q.fragments = skb; in ip6_frag_queue()
383 struct sk_buff *fp, *head = fq->q.fragments; in ip6_frag_reasm()
408 skb_morph(head, fq->q.fragments); in ip6_frag_reasm()
409 head->next = fq->q.fragments->next; in ip6_frag_reasm()
411 consume_skb(fq->q.fragments); in ip6_frag_reasm()
412 fq->q.fragments = head; in ip6_frag_reasm()
[all …]
/linux-4.1.27/drivers/staging/rtl8192e/
Drtllib_tx.c238 txb->fragments[i] = dev_alloc_skb(txb_size); in rtllib_alloc_txb()
239 if (unlikely(!txb->fragments[i])) { in rtllib_alloc_txb()
243 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb)); in rtllib_alloc_txb()
247 dev_kfree_skb_any(txb->fragments[i--]); in rtllib_alloc_txb()
627 memcpy(skb_put(txb->fragments[0], skb->len), skb->data, in rtllib_xmit_inter()
795 skb_frag = txb->fragments[i]; in rtllib_xmit_inter()
895 memcpy(skb_put(txb->fragments[0], skb->len), skb->data, in rtllib_xmit_inter()
902 (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE); in rtllib_xmit_inter()
949 rtllib_tx_query_agg_cap(ieee, txb->fragments[0], in rtllib_xmit_inter()
954 txb->fragments[0]); in rtllib_xmit_inter()
Drtllib_softmac.c2489 tcb_desc = (struct cb_desc *)(txb->fragments[0]->cb + in rtllib_softmac_xmit()
2508 txb->fragments[i]); in rtllib_softmac_xmit()
2510 kfree_skb(txb->fragments[i]); in rtllib_softmac_xmit()
2513 txb->fragments[i], in rtllib_softmac_xmit()
2538 ieee->tx_pending.txb->fragments[i], in rtllib_resume_tx()
Drtllib.h1253 struct sk_buff *fragments[0]; member
/linux-4.1.27/fs/squashfs/
Dfragment.c74 u64 fragment_table_start, u64 next_table, unsigned int fragments) in squashfs_read_fragment_index_table() argument
76 unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments); in squashfs_read_fragment_index_table()
Dsuper.c87 unsigned int fragments; in squashfs_fill_super() local
189 TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments)); in squashfs_fill_super()
276 fragments = le32_to_cpu(sblk->fragments); in squashfs_fill_super()
277 if (fragments == 0) in squashfs_fill_super()
289 le64_to_cpu(sblk->fragment_table_start), next_table, fragments); in squashfs_fill_super()
DKconfig199 int "Number of fragments cached" if SQUASHFS_EMBEDDED
203 By default SquashFS caches the last 3 fragments read from
205 has to re-read fragments less often from disk, at the expense
Dsquashfs_fs.h250 __le32 fragments; member
/linux-4.1.27/tools/perf/Documentation/
Dperf-test.txt11 'perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]'
21 To run just specific tests, inform test name fragments or the numbers obtained
/linux-4.1.27/net/ieee802154/6lowpan/
Dreassembly.c182 for (next = fq->q.fragments; next != NULL; next = next->next) { in lowpan_frag_queue()
196 fq->q.fragments = skb; in lowpan_frag_queue()
240 struct sk_buff *fp, *head = fq->q.fragments; in lowpan_frag_reasm()
258 skb_morph(head, fq->q.fragments); in lowpan_frag_reasm()
259 head->next = fq->q.fragments->next; in lowpan_frag_reasm()
261 consume_skb(fq->q.fragments); in lowpan_frag_reasm()
262 fq->q.fragments = head; in lowpan_frag_reasm()
319 fq->q.fragments = NULL; in lowpan_frag_reasm()
/linux-4.1.27/sound/oss/
Daudio.c783 info.fragments = dmap->qlen; in dma_ioctl()
787 info.fragments = 0; in dma_ioctl()
790 info.fragments = DMAbuf_space_in_queue(dev); in dma_ioctl()
794 if (tmp && info.fragments) in dma_ioctl()
798 info.fragments -= tmp; in dma_ioctl()
802 if (info.fragments < 0) in dma_ioctl()
803 info.fragments = 0; in dma_ioctl()
804 else if (info.fragments > dmap->nbufs) in dma_ioctl()
805 info.fragments = dmap->nbufs; in dma_ioctl()
808 info.bytes = info.fragments * dmap->fragment_size; in dma_ioctl()
[all …]
Dswarm_cs4297a.c2192 abinfo.fragments = abinfo.bytes >> s->dma_dac.fragshift; in cs4297a_ioctl()
2196 abinfo.fragments)); in cs4297a_ioctl()
2212 abinfo.fragments = in cs4297a_ioctl()
2218 abinfo.fragments = in cs4297a_ioctl()
Dmsnd_pinnacle.c235 abinfo.fragments = abinfo.bytes / abinfo.fragsize; in dsp_ioctl()
246 abinfo.fragments = abinfo.bytes / abinfo.fragsize; in dsp_ioctl()
DCHANGELOG157 this change breaks some programs which assume that fragments cannot be
/linux-4.1.27/net/ipv6/netfilter/
Dnf_conntrack_reasm.c308 for (next = fq->q.fragments; next != NULL; next = next->next) { in nf_ct_frag6_queue()
341 fq->q.fragments = skb; in nf_ct_frag6_queue()
382 struct sk_buff *fp, *op, *head = fq->q.fragments; in nf_ct_frag6_reasm()
474 fq->q.fragments = NULL; in nf_ct_frag6_reasm()
/linux-4.1.27/include/uapi/sound/
Dcompress_offload.h41 __u32 fragments; member
/linux-4.1.27/include/sound/
Dcompress_driver.h58 u32 fragments; member
/linux-4.1.27/include/net/
Dinet_frag.h54 struct sk_buff *fragments; member
/linux-4.1.27/Documentation/scsi/
Dsym53c500_cs.txt11 the code fragments I shamelessly adapted for this work. Thanks also to
/linux-4.1.27/sound/core/
Dcompress_offload.c481 buffer_size = params->buffer.fragment_size * params->buffer.fragments; in snd_compr_allocate_buffer()
493 stream->runtime->fragments = params->buffer.fragments; in snd_compr_allocate_buffer()
503 params->buffer.fragments > INT_MAX / params->buffer.fragment_size) in snd_compress_check_input()
/linux-4.1.27/Documentation/networking/
Dudplite.txt187 several tiny fragments, consider the following example.
203 the last packet before releasing the fragments to the IP module.
211 of 3356 bytes will be split into the following fragments:
217 The first two fragments have to be checksummed in full, of the last
Dppp_generic.txt14 ordering and combining received fragments
176 or more fragments and puts a multilink header on each fragment. It
177 decides how many fragments to use based on the length of the packet
180 fragment if it doesn't have any fragments currently queued up for it
183 scheme has the effect that more fragments are given to higher-
187 transmitted as single fragments, thus reducing the overhead of
342 numbers on received multilink fragments
429 fragments is disabled. This ioctl is only available if the
Dnf_conntrack-sysctl.txt60 Maximum memory used to reassemble IPv6 fragments. When
Dip-sysctl.txt23 manually if you want to avoid locally generated fragments.
116 Maximum memory used to reassemble IP fragments. When
123 Maximum memory used to reassemble IP fragments before the kernel
125 The kernel still accepts new fragments for defragmentation.
132 maximum "disorder" which is allowed among fragments which share a
134 not unusual, but if a large number of fragments arrive from a source
136 probably indicates that one or more fragments belonging to that queue
138 is done on fragments before they are added to a reassembly queue - if
139 ipfrag_max_dist (or more) fragments have arrived from a particular IP
141 address, it's presumed that one or more fragments in the queue are
[all …]
Dpktgen.txt106 pgset "frags 5" packet will consist of 5 fragments
Dbonding.txt874 this criteria, as TCP rarely fragments traffic, and
/linux-4.1.27/Documentation/filesystems/
Dsquashfs.txt34 Tail-end packing (fragments): yes no
74 | & fragments |
249 (i.e. inode or directory) or fragment access. Because metadata and fragments
251 particular piece of metadata or fragment will retrieve other metadata/fragments
Dext2.txt70 specification for Access Control Lists (ACLs), fragments, undeletion and
141 modification time, deletion time, number of links, fragments, version
/linux-4.1.27/Documentation/sound/alsa/
DOSS-Emulation.txt128 app_name fragments fragment_size [options]
132 fragments specifies the number of fragments or zero if no specific
143 partial-frag write also partial fragments (affects playback only)
Dhdspm.txt63 fragments -- 2
Dcompress_offload.txt80 The concept of memory ring buffer divided in a set of fragments is
DALSA-Configuration.txt2254 String "<app_name> <fragments> <fragment_size> [<options>]"
2257 <fragments> - number of fragments or zero if auto
2267 - whole-frag write only whole fragments (optimization affecting
/linux-4.1.27/drivers/staging/i2o/
DREADME.ioctl205 u32 *maxfrag; /* Number of fragments */
216 The fragments _must_ be sent in order and be 8K in size. The last
246 u32 *maxfrag; /* Number of fragments */
258 The fragments _must_ be requested in order and be 8K in size. The
/linux-4.1.27/Documentation/devicetree/
Doverlay-notes.txt127 /* more fragments follow */
/linux-4.1.27/fs/ext4/
Dmballoc.c565 int fragments = 0; in __mb_check_buddy() local
621 fragments++; in __mb_check_buddy()
636 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); in __mb_check_buddy()
731 unsigned fragments = 0; in ext4_mb_generate_buddy() local
739 fragments++; in ext4_mb_generate_buddy()
751 grp->bb_fragments = fragments; in ext4_mb_generate_buddy()
2017 unsigned free, fragments; in ext4_mb_good_group() local
2039 fragments = grp->bb_fragments; in ext4_mb_good_group()
2040 if (fragments == 0) in ext4_mb_good_group()
2054 (free / fragments) >= ac->ac_g_ex.fe_len) in ext4_mb_good_group()
[all …]
/linux-4.1.27/Documentation/ABI/testing/
Ddev-kmsg93 fragment of a line. All following fragments are flagged with
/linux-4.1.27/sound/oss/dmasound/
Ddmasound_core.c1131 info.fragments = write_sq.max_active - write_sq.count; in sq_ioctl()
1134 info.bytes = info.fragments * info.fragsize; in sq_ioctl()
/linux-4.1.27/drivers/staging/rtl8712/
Dieee80211.h651 struct sk_buff *fragments[0]; member
/linux-4.1.27/Documentation/nfc/
Dnfc-hci.txt222 This function will dequeue the next pending command and send its HCP fragments
229 SHDLC I(nformation) frames payload are HCP fragments. They are aggregated to
/linux-4.1.27/Documentation/filesystems/nfs/
DExporting25 A filesystem which supports the mapping between filehandle fragments
/linux-4.1.27/fs/jffs2/
DREADME.Locking45 including the linked list of node fragments (but see the notes below on
/linux-4.1.27/sound/core/oss/
Dpcm_oss.c2178 info.fragments = runtime->oss.periods; in snd_pcm_oss_get_space()
2181 info.fragments = 0; in snd_pcm_oss_get_space()
2201 info.fragments = info.bytes / runtime->oss.period_bytes; in snd_pcm_oss_get_space()
2207 info.bytes, info.fragments, info.fragstotal, info.fragsize); in snd_pcm_oss_get_space()
/linux-4.1.27/include/uapi/linux/
Dsoundcard.h573 int fragments; /* # of available fragments (partially usend ones not counted) */ member
/linux-4.1.27/net/mac80211/
Diface.c1100 __skb_queue_purge(&sdata->fragments[i].skb_list); in ieee80211_teardown_sdata()
1751 skb_queue_head_init(&sdata->fragments[i].skb_list); in ieee80211_if_add()
Dieee80211_i.h847 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; member
Drx.c1715 entry = &sdata->fragments[sdata->fragment_next++]; in ieee80211_reassemble_add()
1750 entry = &sdata->fragments[idx]; in ieee80211_reassemble_find()
/linux-4.1.27/drivers/staging/rtl8188eu/include/
Dieee80211.h860 struct sk_buff *fragments[0]; member
/linux-4.1.27/drivers/net/ethernet/nvidia/
Dforcedeth.c2190 unsigned int fragments = skb_shinfo(skb)->nr_frags; in nv_start_xmit() local
2205 for (i = 0; i < fragments; i++) { in nv_start_xmit()
2255 for (i = 0; i < fragments; i++) { in nv_start_xmit()
2337 unsigned int fragments = skb_shinfo(skb)->nr_frags; in nv_start_xmit_optimized() local
2353 for (i = 0; i < fragments; i++) { in nv_start_xmit_optimized()
2405 for (i = 0; i < fragments; i++) { in nv_start_xmit_optimized()
/linux-4.1.27/Documentation/crypto/
Ddescore-readme.txt263 (actually, there are some code fragments now which do require two temps,
/linux-4.1.27/Documentation/block/
Dbiodoc.txt395 (including non-page aligned page fragments, as specified via readv/writev)
454 of an array of <page, offset, len> fragments (similar to the way fragments
/linux-4.1.27/Documentation/
Dmemory-barriers.txt785 between the loads and stores in the CPU 0 and CPU 1 code fragments,