rfml               37 net/caif/cfrfml.c 	struct cfrfml *rfml = container_obj(&srvl->layer);
rfml               39 net/caif/cfrfml.c 	if (rfml->incomplete_frm)
rfml               40 net/caif/cfrfml.c 		cfpkt_destroy(rfml->incomplete_frm);
rfml               71 net/caif/cfrfml.c static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead,
rfml               82 net/caif/cfrfml.c 	if (memcmp(seghead, rfml->seghead, 6) != 0)
rfml               85 net/caif/cfrfml.c 	tmppkt = cfpkt_append(rfml->incomplete_frm, pkt,
rfml               86 net/caif/cfrfml.c 			rfml->pdu_size + RFM_HEAD_SIZE);
rfml              103 net/caif/cfrfml.c 	struct cfrfml *rfml;
rfml              108 net/caif/cfrfml.c 	rfml = container_obj(layr);
rfml              109 net/caif/cfrfml.c 	spin_lock(&rfml->sync);
rfml              117 net/caif/cfrfml.c 		if (rfml->incomplete_frm == NULL) {
rfml              119 net/caif/cfrfml.c 			if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
rfml              122 net/caif/cfrfml.c 			rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
rfml              126 net/caif/cfrfml.c 			rfml->incomplete_frm = pkt;
rfml              130 net/caif/cfrfml.c 			tmppkt = rfm_append(rfml, seghead, pkt, &err);
rfml              137 net/caif/cfrfml.c 			rfml->incomplete_frm = tmppkt;
rfml              147 net/caif/cfrfml.c 	if (rfml->incomplete_frm) {
rfml              150 net/caif/cfrfml.c 		tmppkt = rfm_append(rfml, seghead, pkt, &err);
rfml              157 net/caif/cfrfml.c 		rfml->incomplete_frm = NULL;
rfml              163 net/caif/cfrfml.c 		if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
rfml              167 net/caif/cfrfml.c 	err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt);
rfml              176 net/caif/cfrfml.c 		if (rfml->incomplete_frm)
rfml              177 net/caif/cfrfml.c 			cfpkt_destroy(rfml->incomplete_frm);
rfml              178 net/caif/cfrfml.c 		rfml->incomplete_frm = NULL;
rfml              184 net/caif/cfrfml.c 					rfml->serv.dev_info.id);
rfml              186 net/caif/cfrfml.c 	spin_unlock(&rfml->sync);
rfml              196 net/caif/cfrfml.c static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
rfml              198 net/caif/cfrfml.c 	caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size + RFM_HEAD_SIZE);
rfml              201 net/caif/cfrfml.c 	cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
rfml              208 net/caif/cfrfml.c 	cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info;
rfml              210 net/caif/cfrfml.c 	return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt);
rfml              220 net/caif/cfrfml.c 	struct cfrfml *rfml = container_obj(layr);
rfml              225 net/caif/cfrfml.c 	if (!cfsrvl_ready(&rfml->serv, &err))
rfml              233 net/caif/cfrfml.c 	if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
rfml              239 net/caif/cfrfml.c 	while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
rfml              253 net/caif/cfrfml.c 		rearpkt = cfpkt_split(frontpkt, rfml->fragment_size);
rfml              257 net/caif/cfrfml.c 		err = cfrfml_transmit_segment(rfml, frontpkt);
rfml              279 net/caif/cfrfml.c 	err = cfrfml_transmit_segment(rfml, frontpkt);
rfml              289 net/caif/cfrfml.c 					rfml->serv.dev_info.id);