root/net/caif/cfserl.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cfserl_create
  2. cfserl_receive
  3. cfserl_transmit
  4. cfserl_ctrlcmd

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) ST-Ericsson AB 2010
   4  * Author:      Sjur Brendeland
   5  */
   6 
   7 #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
   8 
   9 #include <linux/stddef.h>
  10 #include <linux/spinlock.h>
  11 #include <linux/slab.h>
  12 #include <net/caif/caif_layer.h>
  13 #include <net/caif/cfpkt.h>
  14 #include <net/caif/cfserl.h>
  15 
  16 #define container_obj(layr) ((struct cfserl *) layr)
  17 
  18 #define CFSERL_STX 0x02
  19 #define SERIAL_MINIUM_PACKET_SIZE 4
  20 #define SERIAL_MAX_FRAMESIZE 4096
  21 struct cfserl {
  22         struct cflayer layer;
  23         struct cfpkt *incomplete_frm;
  24         /* Protects parallel processing of incoming packets */
  25         spinlock_t sync;
  26         bool usestx;
  27 };
  28 
  29 static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
  30 static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
  31 static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
  32                            int phyid);
  33 
  34 struct cflayer *cfserl_create(int instance, bool use_stx)
  35 {
  36         struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
  37         if (!this)
  38                 return NULL;
  39         caif_assert(offsetof(struct cfserl, layer) == 0);
  40         this->layer.receive = cfserl_receive;
  41         this->layer.transmit = cfserl_transmit;
  42         this->layer.ctrlcmd = cfserl_ctrlcmd;
  43         this->usestx = use_stx;
  44         spin_lock_init(&this->sync);
  45         snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
  46         return &this->layer;
  47 }
  48 
  49 static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
  50 {
  51         struct cfserl *layr = container_obj(l);
  52         u16 pkt_len;
  53         struct cfpkt *pkt = NULL;
  54         struct cfpkt *tail_pkt = NULL;
  55         u8 tmp8;
  56         u16 tmp;
  57         u8 stx = CFSERL_STX;
  58         int ret;
  59         u16 expectlen = 0;
  60 
  61         caif_assert(newpkt != NULL);
  62         spin_lock(&layr->sync);
  63 
  64         if (layr->incomplete_frm != NULL) {
  65                 layr->incomplete_frm =
  66                     cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
  67                 pkt = layr->incomplete_frm;
  68                 if (pkt == NULL) {
  69                         spin_unlock(&layr->sync);
  70                         return -ENOMEM;
  71                 }
  72         } else {
  73                 pkt = newpkt;
  74         }
  75         layr->incomplete_frm = NULL;
  76 
  77         do {
  78                 /* Search for STX at start of pkt if STX is used */
  79                 if (layr->usestx) {
  80                         cfpkt_extr_head(pkt, &tmp8, 1);
  81                         if (tmp8 != CFSERL_STX) {
  82                                 while (cfpkt_more(pkt)
  83                                        && tmp8 != CFSERL_STX) {
  84                                         cfpkt_extr_head(pkt, &tmp8, 1);
  85                                 }
  86                                 if (!cfpkt_more(pkt)) {
  87                                         cfpkt_destroy(pkt);
  88                                         layr->incomplete_frm = NULL;
  89                                         spin_unlock(&layr->sync);
  90                                         return -EPROTO;
  91                                 }
  92                         }
  93                 }
  94 
  95                 pkt_len = cfpkt_getlen(pkt);
  96 
  97                 /*
  98                  *  pkt_len is the accumulated length of the packet data
  99                  *  we have received so far.
 100                  *  Exit if frame doesn't hold length.
 101                  */
 102 
 103                 if (pkt_len < 2) {
 104                         if (layr->usestx)
 105                                 cfpkt_add_head(pkt, &stx, 1);
 106                         layr->incomplete_frm = pkt;
 107                         spin_unlock(&layr->sync);
 108                         return 0;
 109                 }
 110 
 111                 /*
 112                  *  Find length of frame.
 113                  *  expectlen is the length we need for a full frame.
 114                  */
 115                 cfpkt_peek_head(pkt, &tmp, 2);
 116                 expectlen = le16_to_cpu(tmp) + 2;
 117                 /*
 118                  * Frame error handling
 119                  */
 120                 if (expectlen < SERIAL_MINIUM_PACKET_SIZE
 121                     || expectlen > SERIAL_MAX_FRAMESIZE) {
 122                         if (!layr->usestx) {
 123                                 if (pkt != NULL)
 124                                         cfpkt_destroy(pkt);
 125                                 layr->incomplete_frm = NULL;
 126                                 expectlen = 0;
 127                                 spin_unlock(&layr->sync);
 128                                 return -EPROTO;
 129                         }
 130                         continue;
 131                 }
 132 
 133                 if (pkt_len < expectlen) {
 134                         /* Too little received data */
 135                         if (layr->usestx)
 136                                 cfpkt_add_head(pkt, &stx, 1);
 137                         layr->incomplete_frm = pkt;
 138                         spin_unlock(&layr->sync);
 139                         return 0;
 140                 }
 141 
 142                 /*
 143                  * Enough data for at least one frame.
 144                  * Split the frame, if too long
 145                  */
 146                 if (pkt_len > expectlen)
 147                         tail_pkt = cfpkt_split(pkt, expectlen);
 148                 else
 149                         tail_pkt = NULL;
 150 
 151                 /* Send the first part of packet upwards.*/
 152                 spin_unlock(&layr->sync);
 153                 ret = layr->layer.up->receive(layr->layer.up, pkt);
 154                 spin_lock(&layr->sync);
 155                 if (ret == -EILSEQ) {
 156                         if (layr->usestx) {
 157                                 if (tail_pkt != NULL)
 158                                         pkt = cfpkt_append(pkt, tail_pkt, 0);
 159                                 /* Start search for next STX if frame failed */
 160                                 continue;
 161                         } else {
 162                                 cfpkt_destroy(pkt);
 163                                 pkt = NULL;
 164                         }
 165                 }
 166 
 167                 pkt = tail_pkt;
 168 
 169         } while (pkt != NULL);
 170 
 171         spin_unlock(&layr->sync);
 172         return 0;
 173 }
 174 
 175 static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
 176 {
 177         struct cfserl *layr = container_obj(layer);
 178         u8 tmp8 = CFSERL_STX;
 179         if (layr->usestx)
 180                 cfpkt_add_head(newpkt, &tmp8, 1);
 181         return layer->dn->transmit(layer->dn, newpkt);
 182 }
 183 
 184 static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
 185                            int phyid)
 186 {
 187         layr->up->ctrlcmd(layr->up, ctrl, phyid);
 188 }

/* [<][>][^][v][top][bottom][index][help] */