2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
9 #include <linux/slab.h>
10 #include <net/caif/caif_layer.h>
11 #include <net/caif/cfpkt.h>
12 #include <net/caif/cfserl.h>
14 #define container_obj(layr) ((struct cfserl *) layr)
16 #define CFSERL_STX 0x02
17 #define SERIAL_MINIUM_PACKET_SIZE 4
18 #define SERIAL_MAX_FRAMESIZE 4096
21 struct cfpkt *incomplete_frm;
22 /* Protects parallel processing of incoming packets */
26 #define STXLEN(layr) (layr->usestx ? 1 : 0)
28 static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
29 static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
30 static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
33 struct cflayer *cfserl_create(int type, int instance, bool use_stx)
35 struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
37 pr_warning("CAIF: %s(): Out of memory\n", __func__);
40 caif_assert(offsetof(struct cfserl, layer) == 0);
41 memset(this, 0, sizeof(struct cfserl));
42 this->layer.receive = cfserl_receive;
43 this->layer.transmit = cfserl_transmit;
44 this->layer.ctrlcmd = cfserl_ctrlcmd;
45 this->layer.type = type;
46 this->usestx = use_stx;
47 spin_lock_init(&this->sync);
48 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
52 static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
54 struct cfserl *layr = container_obj(l);
56 struct cfpkt *pkt = NULL;
57 struct cfpkt *tail_pkt = NULL;
64 caif_assert(newpkt != NULL);
65 spin_lock(&layr->sync);
67 if (layr->incomplete_frm != NULL) {
68 layr->incomplete_frm =
69 cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
70 pkt = layr->incomplete_frm;
72 spin_unlock(&layr->sync);
78 layr->incomplete_frm = NULL;
81 /* Search for STX at start of pkt if STX is used */
83 cfpkt_extr_head(pkt, &tmp8, 1);
84 if (tmp8 != CFSERL_STX) {
85 while (cfpkt_more(pkt)
86 && tmp8 != CFSERL_STX) {
87 cfpkt_extr_head(pkt, &tmp8, 1);
89 if (!cfpkt_more(pkt)) {
91 layr->incomplete_frm = NULL;
92 spin_unlock(&layr->sync);
98 pkt_len = cfpkt_getlen(pkt);
101 * pkt_len is the accumulated length of the packet data
102 * we have received so far.
103 * Exit if frame doesn't hold length.
108 cfpkt_add_head(pkt, &stx, 1);
109 layr->incomplete_frm = pkt;
110 spin_unlock(&layr->sync);
115 * Find length of frame.
116 * expectlen is the length we need for a full frame.
118 cfpkt_peek_head(pkt, &tmp, 2);
119 expectlen = le16_to_cpu(tmp) + 2;
121 * Frame error handling
123 if (expectlen < SERIAL_MINIUM_PACKET_SIZE
124 || expectlen > SERIAL_MAX_FRAMESIZE) {
128 layr->incomplete_frm = NULL;
130 spin_unlock(&layr->sync);
136 if (pkt_len < expectlen) {
137 /* Too little received data */
139 cfpkt_add_head(pkt, &stx, 1);
140 layr->incomplete_frm = pkt;
141 spin_unlock(&layr->sync);
146 * Enough data for at least one frame.
147 * Split the frame, if too long
149 if (pkt_len > expectlen)
150 tail_pkt = cfpkt_split(pkt, expectlen);
154 /* Send the first part of packet upwards.*/
155 spin_unlock(&layr->sync);
156 ret = layr->layer.up->receive(layr->layer.up, pkt);
157 spin_lock(&layr->sync);
158 if (ret == -EILSEQ) {
160 if (tail_pkt != NULL)
161 pkt = cfpkt_append(pkt, tail_pkt, 0);
162 /* Start search for next STX if frame failed */
172 } while (pkt != NULL);
174 spin_unlock(&layr->sync);
178 static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
180 struct cfserl *layr = container_obj(layer);
182 u8 tmp8 = CFSERL_STX;
184 cfpkt_add_head(newpkt, &tmp8, 1);
185 ret = layer->dn->transmit(layer->dn, newpkt);
187 cfpkt_extr_head(newpkt, &tmp8, 1);
192 static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
195 layr->up->ctrlcmd(layr->up, ctrl, phyid);