/linux-4.4.14/net/sctp/ |
H A D | tsnmap.c | 9 * These functions manipulate sctp tsn mapping array. 84 int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn) sctp_tsnmap_check() argument 89 if (TSN_lte(tsn, map->cumulative_tsn_ack_point)) sctp_tsnmap_check() 95 if (!TSN_lt(tsn, map->base_tsn + SCTP_TSN_MAP_SIZE)) sctp_tsnmap_check() 99 gap = tsn - map->base_tsn; sctp_tsnmap_check() 110 int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn, sctp_tsnmap_mark() argument 115 if (TSN_lt(tsn, map->base_tsn)) sctp_tsnmap_mark() 118 gap = tsn - map->base_tsn; sctp_tsnmap_mark() 124 /* In this case the map has no gaps and the tsn we are sctp_tsnmap_mark() 125 * recording is the next expected tsn. We don't touch sctp_tsnmap_mark() 140 if (TSN_lt(map->max_tsn_seen, tsn)) sctp_tsnmap_mark() 141 map->max_tsn_seen = tsn; sctp_tsnmap_mark() 205 void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn) sctp_tsnmap_skip() argument 209 if (TSN_lt(tsn, map->base_tsn)) sctp_tsnmap_skip() 211 if (!TSN_lt(tsn, map->base_tsn + SCTP_TSN_MAP_SIZE)) sctp_tsnmap_skip() 215 if (TSN_lt(map->max_tsn_seen, tsn)) sctp_tsnmap_skip() 216 map->max_tsn_seen = tsn; sctp_tsnmap_skip() 218 gap = tsn - map->base_tsn + 1; sctp_tsnmap_skip() 315 void sctp_tsnmap_renege(struct sctp_tsnmap *map, __u32 tsn) sctp_tsnmap_renege() argument 319 if (TSN_lt(tsn, map->base_tsn)) sctp_tsnmap_renege() 322 if (!TSN_lt(tsn, map->base_tsn + map->len)) sctp_tsnmap_renege() 325 gap = tsn - map->base_tsn; sctp_tsnmap_renege()
|
H A D | ulpqueue.c | 287 __u32 tsn, ctsn; sctp_ulpq_store_reasm() local 289 tsn = event->tsn; sctp_ulpq_store_reasm() 300 ctsn = cevent->tsn; sctp_ulpq_store_reasm() 301 if (TSN_lt(ctsn, tsn)) { sctp_ulpq_store_reasm() 309 ctsn = cevent->tsn; sctp_ulpq_store_reasm() 311 if (TSN_lt(tsn, ctsn)) sctp_ulpq_store_reasm() 441 ctsn = cevent->tsn; sctp_ulpq_retrieve_reassembled() 539 ctsn = cevent->tsn; sctp_ulpq_retrieve_partial() 603 /* Do not even bother unless this is the next tsn to sctp_ulpq_reasm() 606 ctsn = event->tsn; sctp_ulpq_reasm() 637 ctsn = cevent->tsn; sctp_ulpq_retrieve_first() 698 __u32 tsn; sctp_ulpq_reasm_flushtsn() local 705 tsn = event->tsn; sctp_ulpq_reasm_flushtsn() 712 if (TSN_lte(tsn, fwd_tsn)) { sctp_ulpq_reasm_flushtsn() 881 * ordered by forward tsn skipping their dependencies. 977 __u32 tsn, last_tsn; sctp_ulpq_renege_list() local 986 tsn = event->tsn; sctp_ulpq_renege_list() 989 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap))) sctp_ulpq_renege_list() 1003 last_tsn = sctp_skb2event(last)->tsn; sctp_ulpq_renege_list() 1005 last_tsn = tsn; sctp_ulpq_renege_list() 1010 while (TSN_lte(tsn, last_tsn)) { sctp_ulpq_renege_list() 1011 sctp_tsnmap_renege(tsnmap, tsn); sctp_ulpq_renege_list() 1012 tsn++; sctp_ulpq_renege_list() 1057 ctsn = sctp_skb2event(skb)->tsn; sctp_ulpq_partial_delivery()
|
H A D | outqueue.c | 55 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); 162 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn) sctp_cacc_skip_3_2() argument 165 TSN_lt(tsn, primary->cacc.next_tsn_at_change)) sctp_cacc_skip_3_2() 187 __u32 tsn) sctp_cacc_skip() 191 sctp_cacc_skip_3_2(primary, tsn))) sctp_cacc_skip() 360 ntsn = ntohl(nchunk->subh.data_hdr->tsn); sctp_insert_list() 364 ltsn = ntohl(lchunk->subh.data_hdr->tsn); list_for_each() 1005 pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " sctp_outq_flush() 1009 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), sctp_outq_flush() 1023 pr_debug("%s: could not transmit tsn:0x%x, status:%d\n", sctp_outq_flush() 1024 __func__, ntohl(chunk->subh.data_hdr->tsn), sctp_outq_flush() 1132 __u32 sack_ctsn, ctsn, tsn; sctp_outq_sack() local 1242 tsn = ntohl(tchunk->subh.data_hdr->tsn); 1243 if (TSN_lte(tsn, ctsn)) { 1267 pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn); 1268 pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, " 1309 __u32 tsn; sctp_check_transmitted() local 1342 tsn = ntohl(tchunk->subh.data_hdr->tsn); sctp_check_transmitted() 1343 if (sctp_acked(sack, tsn)) { sctp_check_transmitted() 1379 if (TSN_lt(*highest_new_tsn_in_sack, tsn)) sctp_check_transmitted() 1380 *highest_new_tsn_in_sack = tsn; sctp_check_transmitted() 1387 if (TSN_lte(tsn, sack_ctsn)) { sctp_check_transmitted() 1445 __func__, tsn); sctp_check_transmitted() 1581 __u32 tsn; sctp_mark_missing() local 1588 tsn = ntohl(chunk->subh.data_hdr->tsn); list_for_each_entry() 1599 TSN_lt(tsn, highest_new_tsn_in_sack)) { list_for_each_entry() 1606 count_of_newacks, tsn)) { list_for_each_entry() 1609 pr_debug("%s: tsn:0x%x missing counter:%d\n", list_for_each_entry() 1610 __func__, tsn, chunk->tsn_missing_report); list_for_each_entry() 1638 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn) sctp_acked() argument 1645 if (TSN_lte(tsn, ctsn)) sctp_acked() 1661 gap = tsn - ctsn; sctp_acked() 1693 __u32 tsn; sctp_generate_fwdtsn() local 1733 tsn = ntohl(chunk->subh.data_hdr->tsn); sctp_generate_fwdtsn() 1738 if (TSN_lte(tsn, ctsn)) { sctp_generate_fwdtsn() 1742 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) { sctp_generate_fwdtsn() 1743 asoc->adv_peer_ack_point = tsn; sctp_generate_fwdtsn() 184 sctp_cacc_skip(struct sctp_transport *primary, struct sctp_transport *transport, int count_of_newacks, __u32 tsn) sctp_cacc_skip() argument
|
H A D | ulpevent.c | 671 ntohl(chunk->subh.data_hdr->tsn), sctp_ulpevent_make_rcvmsg() 711 event->tsn = ntohl(chunk->subh.data_hdr->tsn); sctp_ulpevent_make_rcvmsg() 877 sinfo.sinfo_tsn = event->tsn; sctp_ulpevent_read_sndrcvinfo() 905 rinfo.rcv_tsn = event->tsn; sctp_ulpevent_read_rcvinfo()
|
H A D | sm_statefuns.c | 2659 /* If Cumulative TSN Ack beyond the max tsn currently sctp_sf_do_9_2_shutdown() 2744 /* If Cumulative TSN Ack beyond the max tsn currently sctp_sf_do_9_2_shut_ctsn() 3198 /* If Cumulative TSN Ack beyond the max tsn currently sctp_sf_eat_sack_6_2() 3843 __u32 tsn; sctp_sf_eat_fwd_tsn() local 3862 tsn = ntohl(fwdtsn_hdr->new_cum_tsn); sctp_sf_eat_fwd_tsn() 3863 pr_debug("%s: TSN 0x%x\n", __func__, tsn); sctp_sf_eat_fwd_tsn() 3868 if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0) sctp_sf_eat_fwd_tsn() 3877 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn)); 3911 __u32 tsn; sctp_sf_eat_fwd_tsn_fast() local 3930 tsn = ntohl(fwdtsn_hdr->new_cum_tsn); sctp_sf_eat_fwd_tsn_fast() 3931 pr_debug("%s: TSN 0x%x\n", __func__, tsn); sctp_sf_eat_fwd_tsn_fast() 3936 if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0) sctp_sf_eat_fwd_tsn_fast() 3945 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn)); 4503 * cumulative tsn ack to a point beyond the max tsn currently sent. 4516 static const char err_str[] = "The cumulative tsn ack beyond the max tsn currently sent:"; sctp_sf_violation_ctsn() 6104 __u32 tsn; sctp_eat_data() local 6115 tsn = ntohl(data_hdr->tsn); sctp_eat_data() 6116 pr_debug("%s: TSN 0x%x\n", __func__, tsn); sctp_eat_data() 6140 SCTP_U32(tsn)); sctp_eat_data() 6144 tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn); sctp_eat_data() 6154 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn)); sctp_eat_data() 6192 (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { sctp_eat_data() 6193 pr_debug("%s: reneging for tsn:%u\n", __func__, tsn); sctp_eat_data() 6196 pr_debug("%s: discard tsn:%u len:%zu, rwnd:%d\n", sctp_eat_data() 6197 __func__, tsn, datalen, asoc->rwnd); sctp_eat_data() 6212 (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { sctp_eat_data() 6213 pr_debug("%s: under pressure, reneging for tsn:%u\n", sctp_eat_data() 6214 __func__, tsn); sctp_eat_data() 6228 err = sctp_make_abort_no_data(asoc, chunk, tsn); sctp_eat_data() 6272 /* Mark tsn as received even though we drop it */ sctp_eat_data() 6273 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); sctp_eat_data()
|
H A D | associola.c | 914 __u32 tsn) sctp_assoc_lookup_tsn() 920 __be32 key = htonl(tsn); sctp_assoc_lookup_tsn() 944 if (key == chunk->subh.data_hdr->tsn) { sctp_assoc_lookup_tsn() 958 if (key == chunk->subh.data_hdr->tsn) { sctp_assoc_lookup_tsn() 913 sctp_assoc_lookup_tsn(struct sctp_association *asoc, __u32 tsn) sctp_assoc_lookup_tsn() argument
|
H A D | sm_make_chunk.c | 725 dp.tsn = 0; sctp_make_datafrag_empty() 970 const struct sctp_chunk *chunk, __u32 tsn) sctp_make_abort_no_data() 976 + sizeof(tsn)); sctp_make_abort_no_data() 981 /* Put the tsn back into network byte order. */ sctp_make_abort_no_data() 982 payload = htonl(tsn); sctp_make_abort_no_data() 1562 chunk->subh.data_hdr->tsn = sctp_chunk_assign_tsn() 968 sctp_make_abort_no_data( const struct sctp_association *asoc, const struct sctp_chunk *chunk, __u32 tsn) sctp_make_abort_no_data() argument
|
H A D | output.c | 502 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, sctp_packet_transmit()
|
/linux-4.4.14/include/net/sctp/ |
H A D | tsnmap.h | 110 int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn); 113 int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn, 117 void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn); 137 /* Return pointer to duplicate tsn array as needed by SACK. */ sctp_tsnmap_get_dups() 160 static inline void sctp_tsnmap_mark_dup(struct sctp_tsnmap *map, __u32 tsn) sctp_tsnmap_mark_dup() argument 163 map->dup_tsns[map->num_dup_tsns++] = htonl(tsn); sctp_tsnmap_mark_dup() 167 void sctp_tsnmap_renege(struct sctp_tsnmap *, __u32 tsn);
|
H A D | ulpevent.h | 55 __u32 tsn; member in struct:sctp_ulpevent
|
H A D | sm.h | 220 __u32 tsn);
|
H A D | structs.h | 1031 * CTSN, or cumulative tsn ack point.
|
/linux-4.4.14/net/nfc/ |
H A D | digital_technology.c | 145 u8 tsn; member in struct:digital_sensf_req 831 sensf_req->tsn = 0; digital_in_send_sensf_req()
|
/linux-4.4.14/include/linux/ |
H A D | sctp.h | 223 __be32 tsn; member in struct:sctp_datahdr
|
/linux-4.4.14/drivers/nfc/ |
H A D | pn533.c | 219 u8 tsn; member in struct:pn533_cmd_poll_initdata::__anon8738 261 .tsn = 0x03, 274 .tsn = 0x03,
|