root/net/sctp/chunk.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. sctp_datamsg_init
  2. sctp_datamsg_new
  3. sctp_datamsg_free
  4. sctp_datamsg_destroy
  5. sctp_datamsg_hold
  6. sctp_datamsg_put
  7. sctp_datamsg_assign
  8. sctp_datamsg_from_user
  9. sctp_chunk_abandoned
  10. sctp_chunk_fail

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /* SCTP kernel implementation
   3  * (C) Copyright IBM Corp. 2003, 2004
   4  *
   5  * This file is part of the SCTP kernel implementation
   6  *
   7  * This file contains the code relating the chunk abstraction.
   8  *
   9  * Please send any bug reports or fixes you make to the
  10  * email address(es):
  11  *    lksctp developers <linux-sctp@vger.kernel.org>
  12  *
  13  * Written or modified by:
  14  *    Jon Grimm             <jgrimm@us.ibm.com>
  15  *    Sridhar Samudrala     <sri@us.ibm.com>
  16  */
  17 
  18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19 
  20 #include <linux/types.h>
  21 #include <linux/kernel.h>
  22 #include <linux/net.h>
  23 #include <linux/inet.h>
  24 #include <linux/skbuff.h>
  25 #include <linux/slab.h>
  26 #include <net/sock.h>
  27 #include <net/sctp/sctp.h>
  28 #include <net/sctp/sm.h>
  29 
  30 /* This file is mostly in anticipation of future work, but initially
  31  * populate with fragment tracking for an outbound message.
  32  */
  33 
  34 /* Initialize datamsg from memory. */
  35 static void sctp_datamsg_init(struct sctp_datamsg *msg)
  36 {
  37         refcount_set(&msg->refcnt, 1);
  38         msg->send_failed = 0;
  39         msg->send_error = 0;
  40         msg->can_delay = 1;
  41         msg->abandoned = 0;
  42         msg->expires_at = 0;
  43         INIT_LIST_HEAD(&msg->chunks);
  44 }
  45 
  46 /* Allocate and initialize datamsg. */
  47 static struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
  48 {
  49         struct sctp_datamsg *msg;
  50         msg = kmalloc(sizeof(struct sctp_datamsg), gfp);
  51         if (msg) {
  52                 sctp_datamsg_init(msg);
  53                 SCTP_DBG_OBJCNT_INC(datamsg);
  54         }
  55         return msg;
  56 }
  57 
  58 void sctp_datamsg_free(struct sctp_datamsg *msg)
  59 {
  60         struct sctp_chunk *chunk;
  61 
  62         /* This doesn't have to be a _safe vairant because
  63          * sctp_chunk_free() only drops the refs.
  64          */
  65         list_for_each_entry(chunk, &msg->chunks, frag_list)
  66                 sctp_chunk_free(chunk);
  67 
  68         sctp_datamsg_put(msg);
  69 }
  70 
  71 /* Final destructruction of datamsg memory. */
  72 static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
  73 {
  74         struct sctp_association *asoc = NULL;
  75         struct list_head *pos, *temp;
  76         struct sctp_chunk *chunk;
  77         struct sctp_ulpevent *ev;
  78         int error = 0, notify;
  79 
  80         /* If we failed, we may need to notify. */
  81         notify = msg->send_failed ? -1 : 0;
  82 
  83         /* Release all references. */
  84         list_for_each_safe(pos, temp, &msg->chunks) {
  85                 list_del_init(pos);
  86                 chunk = list_entry(pos, struct sctp_chunk, frag_list);
  87                 /* Check whether we _really_ need to notify. */
  88                 if (notify < 0) {
  89                         asoc = chunk->asoc;
  90                         if (msg->send_error)
  91                                 error = msg->send_error;
  92                         else
  93                                 error = asoc->outqueue.error;
  94 
  95                         notify = sctp_ulpevent_type_enabled(asoc->subscribe,
  96                                                             SCTP_SEND_FAILED);
  97                 }
  98 
  99                 /* Generate a SEND FAILED event only if enabled. */
 100                 if (notify > 0) {
 101                         int sent;
 102                         if (chunk->has_tsn)
 103                                 sent = SCTP_DATA_SENT;
 104                         else
 105                                 sent = SCTP_DATA_UNSENT;
 106 
 107                         ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent,
 108                                                             error, GFP_ATOMIC);
 109                         if (ev)
 110                                 asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
 111                 }
 112 
 113                 sctp_chunk_put(chunk);
 114         }
 115 
 116         SCTP_DBG_OBJCNT_DEC(datamsg);
 117         kfree(msg);
 118 }
 119 
 120 /* Hold a reference. */
 121 static void sctp_datamsg_hold(struct sctp_datamsg *msg)
 122 {
 123         refcount_inc(&msg->refcnt);
 124 }
 125 
 126 /* Release a reference. */
 127 void sctp_datamsg_put(struct sctp_datamsg *msg)
 128 {
 129         if (refcount_dec_and_test(&msg->refcnt))
 130                 sctp_datamsg_destroy(msg);
 131 }
 132 
 133 /* Assign a chunk to this datamsg. */
 134 static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chunk)
 135 {
 136         sctp_datamsg_hold(msg);
 137         chunk->msg = msg;
 138 }
 139 
 140 
 141 /* A data chunk can have a maximum payload of (2^16 - 20).  Break
 142  * down any such message into smaller chunks.  Opportunistically, fragment
 143  * the chunks down to the current MTU constraints.  We may get refragmented
 144  * later if the PMTU changes, but it is _much better_ to fragment immediately
 145  * with a reasonable guess than always doing our fragmentation on the
 146  * soft-interrupt.
 147  */
 148 struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
 149                                             struct sctp_sndrcvinfo *sinfo,
 150                                             struct iov_iter *from)
 151 {
 152         size_t len, first_len, max_data, remaining;
 153         size_t msg_len = iov_iter_count(from);
 154         struct sctp_shared_key *shkey = NULL;
 155         struct list_head *pos, *temp;
 156         struct sctp_chunk *chunk;
 157         struct sctp_datamsg *msg;
 158         int err;
 159 
 160         msg = sctp_datamsg_new(GFP_KERNEL);
 161         if (!msg)
 162                 return ERR_PTR(-ENOMEM);
 163 
 164         /* Note: Calculate this outside of the loop, so that all fragments
 165          * have the same expiration.
 166          */
 167         if (asoc->peer.prsctp_capable && sinfo->sinfo_timetolive &&
 168             (SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags) ||
 169              !SCTP_PR_POLICY(sinfo->sinfo_flags)))
 170                 msg->expires_at = jiffies +
 171                                   msecs_to_jiffies(sinfo->sinfo_timetolive);
 172 
 173         /* This is the biggest possible DATA chunk that can fit into
 174          * the packet
 175          */
 176         max_data = asoc->frag_point;
 177         if (unlikely(!max_data)) {
 178                 max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk),
 179                                                sctp_datachk_len(&asoc->stream));
 180                 pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%zu)",
 181                                     __func__, asoc, max_data);
 182         }
 183 
 184         /* If the the peer requested that we authenticate DATA chunks
 185          * we need to account for bundling of the AUTH chunks along with
 186          * DATA.
 187          */
 188         if (sctp_auth_send_cid(SCTP_CID_DATA, asoc)) {
 189                 struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc);
 190 
 191                 if (hmac_desc)
 192                         max_data -= SCTP_PAD4(sizeof(struct sctp_auth_chunk) +
 193                                               hmac_desc->hmac_len);
 194 
 195                 if (sinfo->sinfo_tsn &&
 196                     sinfo->sinfo_ssn != asoc->active_key_id) {
 197                         shkey = sctp_auth_get_shkey(asoc, sinfo->sinfo_ssn);
 198                         if (!shkey) {
 199                                 err = -EINVAL;
 200                                 goto errout;
 201                         }
 202                 } else {
 203                         shkey = asoc->shkey;
 204                 }
 205         }
 206 
 207         /* Set first_len and then account for possible bundles on first frag */
 208         first_len = max_data;
 209 
 210         /* Check to see if we have a pending SACK and try to let it be bundled
 211          * with this message.  Do this if we don't have any data queued already.
 212          * To check that, look at out_qlen and retransmit list.
 213          * NOTE: we will not reduce to account for SACK, if the message would
 214          * not have been fragmented.
 215          */
 216         if (timer_pending(&asoc->timers[SCTP_EVENT_TIMEOUT_SACK]) &&
 217             asoc->outqueue.out_qlen == 0 &&
 218             list_empty(&asoc->outqueue.retransmit) &&
 219             msg_len > max_data)
 220                 first_len -= SCTP_PAD4(sizeof(struct sctp_sack_chunk));
 221 
 222         /* Encourage Cookie-ECHO bundling. */
 223         if (asoc->state < SCTP_STATE_COOKIE_ECHOED)
 224                 first_len -= SCTP_ARBITRARY_COOKIE_ECHO_LEN;
 225 
 226         /* Account for a different sized first fragment */
 227         if (msg_len >= first_len) {
 228                 msg->can_delay = 0;
 229                 if (msg_len > first_len)
 230                         SCTP_INC_STATS(sock_net(asoc->base.sk),
 231                                        SCTP_MIB_FRAGUSRMSGS);
 232         } else {
 233                 /* Which may be the only one... */
 234                 first_len = msg_len;
 235         }
 236 
 237         /* Create chunks for all DATA chunks. */
 238         for (remaining = msg_len; remaining; remaining -= len) {
 239                 u8 frag = SCTP_DATA_MIDDLE_FRAG;
 240 
 241                 if (remaining == msg_len) {
 242                         /* First frag, which may also be the last */
 243                         frag |= SCTP_DATA_FIRST_FRAG;
 244                         len = first_len;
 245                 } else {
 246                         /* Middle frags */
 247                         len = max_data;
 248                 }
 249 
 250                 if (len >= remaining) {
 251                         /* Last frag, which may also be the first */
 252                         len = remaining;
 253                         frag |= SCTP_DATA_LAST_FRAG;
 254 
 255                         /* The application requests to set the I-bit of the
 256                          * last DATA chunk of a user message when providing
 257                          * the user message to the SCTP implementation.
 258                          */
 259                         if ((sinfo->sinfo_flags & SCTP_EOF) ||
 260                             (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY))
 261                                 frag |= SCTP_DATA_SACK_IMM;
 262                 }
 263 
 264                 chunk = asoc->stream.si->make_datafrag(asoc, sinfo, len, frag,
 265                                                        GFP_KERNEL);
 266                 if (!chunk) {
 267                         err = -ENOMEM;
 268                         goto errout;
 269                 }
 270 
 271                 err = sctp_user_addto_chunk(chunk, len, from);
 272                 if (err < 0)
 273                         goto errout_chunk_free;
 274 
 275                 chunk->shkey = shkey;
 276 
 277                 /* Put the chunk->skb back into the form expected by send.  */
 278                 __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr -
 279                                        chunk->skb->data);
 280 
 281                 sctp_datamsg_assign(msg, chunk);
 282                 list_add_tail(&chunk->frag_list, &msg->chunks);
 283         }
 284 
 285         return msg;
 286 
 287 errout_chunk_free:
 288         sctp_chunk_free(chunk);
 289 
 290 errout:
 291         list_for_each_safe(pos, temp, &msg->chunks) {
 292                 list_del_init(pos);
 293                 chunk = list_entry(pos, struct sctp_chunk, frag_list);
 294                 sctp_chunk_free(chunk);
 295         }
 296         sctp_datamsg_put(msg);
 297 
 298         return ERR_PTR(err);
 299 }
 300 
 301 /* Check whether this message has expired. */
 302 int sctp_chunk_abandoned(struct sctp_chunk *chunk)
 303 {
 304         if (!chunk->asoc->peer.prsctp_capable)
 305                 return 0;
 306 
 307         if (chunk->msg->abandoned)
 308                 return 1;
 309 
 310         if (!chunk->has_tsn &&
 311             !(chunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG))
 312                 return 0;
 313 
 314         if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
 315             time_after(jiffies, chunk->msg->expires_at)) {
 316                 struct sctp_stream_out *streamout =
 317                         SCTP_SO(&chunk->asoc->stream,
 318                                 chunk->sinfo.sinfo_stream);
 319 
 320                 if (chunk->sent_count) {
 321                         chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
 322                         streamout->ext->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
 323                 } else {
 324                         chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
 325                         streamout->ext->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
 326                 }
 327                 chunk->msg->abandoned = 1;
 328                 return 1;
 329         } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
 330                    chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
 331                 struct sctp_stream_out *streamout =
 332                         SCTP_SO(&chunk->asoc->stream,
 333                                 chunk->sinfo.sinfo_stream);
 334 
 335                 chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
 336                 streamout->ext->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
 337                 chunk->msg->abandoned = 1;
 338                 return 1;
 339         } else if (!SCTP_PR_POLICY(chunk->sinfo.sinfo_flags) &&
 340                    chunk->msg->expires_at &&
 341                    time_after(jiffies, chunk->msg->expires_at)) {
 342                 chunk->msg->abandoned = 1;
 343                 return 1;
 344         }
 345         /* PRIO policy is processed by sendmsg, not here */
 346 
 347         return 0;
 348 }
 349 
 350 /* This chunk (and consequently entire message) has failed in its sending. */
 351 void sctp_chunk_fail(struct sctp_chunk *chunk, int error)
 352 {
 353         chunk->msg->send_failed = 1;
 354         chunk->msg->send_error = error;
 355 }

/* [<][>][^][v][top][bottom][index][help] */