1/*
2 * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/etherdevice.h>
17#include <asm/byteorder.h>
18
19#include <linux/ip.h>
20#include <linux/tcp.h>
21#include <linux/if_ether.h>
22
23#include "gdm_wimax.h"
24#include "hci.h"
25#include "gdm_qos.h"
26
27#define MAX_FREE_LIST_CNT		32
28static struct {
29	struct list_head head;
30	int cnt;
31	spinlock_t lock;
32} qos_free_list;
33
34static void init_qos_entry_list(void)
35{
36	qos_free_list.cnt = 0;
37	INIT_LIST_HEAD(&qos_free_list.head);
38	spin_lock_init(&qos_free_list.lock);
39}
40
41static void *alloc_qos_entry(void)
42{
43	struct qos_entry_s *entry;
44	unsigned long flags;
45
46	spin_lock_irqsave(&qos_free_list.lock, flags);
47	if (qos_free_list.cnt) {
48		entry = list_entry(qos_free_list.head.prev, struct qos_entry_s,
49				   list);
50		list_del(&entry->list);
51		qos_free_list.cnt--;
52		spin_unlock_irqrestore(&qos_free_list.lock, flags);
53		return entry;
54	}
55	spin_unlock_irqrestore(&qos_free_list.lock, flags);
56
57	return kmalloc(sizeof(*entry), GFP_ATOMIC);
58}
59
60static void free_qos_entry(void *entry)
61{
62	struct qos_entry_s *qentry = (struct qos_entry_s *)entry;
63	unsigned long flags;
64
65	spin_lock_irqsave(&qos_free_list.lock, flags);
66	if (qos_free_list.cnt < MAX_FREE_LIST_CNT) {
67		list_add(&qentry->list, &qos_free_list.head);
68		qos_free_list.cnt++;
69		spin_unlock_irqrestore(&qos_free_list.lock, flags);
70		return;
71	}
72	spin_unlock_irqrestore(&qos_free_list.lock, flags);
73
74	kfree(entry);
75}
76
77static void free_qos_entry_list(struct list_head *free_list)
78{
79	struct qos_entry_s *entry, *n;
80	int total_free = 0;
81
82	list_for_each_entry_safe(entry, n, free_list, list) {
83		list_del(&entry->list);
84		kfree(entry);
85		total_free++;
86	}
87
88	pr_debug("%s: total_free_cnt=%d\n", __func__, total_free);
89}
90
91void gdm_qos_init(void *nic_ptr)
92{
93	struct nic *nic = nic_ptr;
94	struct qos_cb_s *qcb = &nic->qos;
95	int i;
96
97	for (i = 0; i < QOS_MAX; i++) {
98		INIT_LIST_HEAD(&qcb->qos_list[i]);
99		qcb->csr[i].qos_buf_count = 0;
100		qcb->csr[i].enabled = false;
101	}
102
103	qcb->qos_list_cnt = 0;
104	qcb->qos_null_idx = QOS_MAX-1;
105	qcb->qos_limit_size = 255;
106
107	spin_lock_init(&qcb->qos_lock);
108
109	init_qos_entry_list();
110}
111
112void gdm_qos_release_list(void *nic_ptr)
113{
114	struct nic *nic = nic_ptr;
115	struct qos_cb_s *qcb = &nic->qos;
116	unsigned long flags;
117	struct qos_entry_s *entry, *n;
118	struct list_head free_list;
119	int i;
120
121	INIT_LIST_HEAD(&free_list);
122
123	spin_lock_irqsave(&qcb->qos_lock, flags);
124
125	for (i = 0; i < QOS_MAX; i++) {
126		qcb->csr[i].qos_buf_count = 0;
127		qcb->csr[i].enabled = false;
128	}
129
130	qcb->qos_list_cnt = 0;
131	qcb->qos_null_idx = QOS_MAX-1;
132
133	for (i = 0; i < QOS_MAX; i++) {
134		list_for_each_entry_safe(entry, n, &qcb->qos_list[i], list) {
135			list_move_tail(&entry->list, &free_list);
136		}
137	}
138	spin_unlock_irqrestore(&qcb->qos_lock, flags);
139	free_qos_entry_list(&free_list);
140}
141
142static int chk_ipv4_rule(struct gdm_wimax_csr_s *csr, u8 *stream, u8 *port)
143{
144	int i;
145
146	if (csr->classifier_rule_en&IPTYPEOFSERVICE) {
147		if (((stream[1] & csr->ip2s_mask) < csr->ip2s_lo) ||
148		    ((stream[1] & csr->ip2s_mask) > csr->ip2s_hi))
149			return 1;
150	}
151
152	if (csr->classifier_rule_en&PROTOCOL) {
153		if (stream[9] != csr->protocol)
154			return 1;
155	}
156
157	if (csr->classifier_rule_en&IPMASKEDSRCADDRESS) {
158		for (i = 0; i < 4; i++) {
159			if ((stream[12 + i] & csr->ipsrc_addrmask[i]) !=
160			(csr->ipsrc_addr[i] & csr->ipsrc_addrmask[i]))
161				return 1;
162		}
163	}
164
165	if (csr->classifier_rule_en&IPMASKEDDSTADDRESS) {
166		for (i = 0; i < 4; i++) {
167			if ((stream[16 + i] & csr->ipdst_addrmask[i]) !=
168			(csr->ipdst_addr[i] & csr->ipdst_addrmask[i]))
169				return 1;
170		}
171	}
172
173	if (csr->classifier_rule_en&PROTOCOLSRCPORTRANGE) {
174		i = ((port[0]<<8)&0xff00)+port[1];
175		if ((i < csr->srcport_lo) || (i > csr->srcport_hi))
176			return 1;
177	}
178
179	if (csr->classifier_rule_en&PROTOCOLDSTPORTRANGE) {
180		i = ((port[2]<<8)&0xff00)+port[3];
181		if ((i < csr->dstport_lo) || (i > csr->dstport_hi))
182			return 1;
183	}
184
185	return 0;
186}
187
188static int get_qos_index(struct nic *nic, u8 *iph, u8 *tcpudph)
189{
190	int ip_ver, i;
191	struct qos_cb_s *qcb = &nic->qos;
192
193	if (iph == NULL || tcpudph == NULL)
194		return -1;
195
196	ip_ver = (iph[0]>>4)&0xf;
197
198	if (ip_ver != 4)
199		return -1;
200
201	for (i = 0; i < QOS_MAX; i++) {
202		if (!qcb->csr[i].enabled)
203			continue;
204		if (!qcb->csr[i].classifier_rule_en)
205			continue;
206		if (chk_ipv4_rule(&qcb->csr[i], iph, tcpudph) == 0)
207			return i;
208	}
209
210	return -1;
211}
212
213static void extract_qos_list(struct nic *nic, struct list_head *head)
214{
215	struct qos_cb_s *qcb = &nic->qos;
216	struct qos_entry_s *entry;
217	int i;
218
219	INIT_LIST_HEAD(head);
220
221	for (i = 0; i < QOS_MAX; i++) {
222		if (!qcb->csr[i].enabled)
223			continue;
224		if (qcb->csr[i].qos_buf_count >= qcb->qos_limit_size)
225			continue;
226		if (list_empty(&qcb->qos_list[i]))
227			continue;
228
229		entry = list_entry(qcb->qos_list[i].prev, struct qos_entry_s,
230				   list);
231
232		list_move_tail(&entry->list, head);
233		qcb->csr[i].qos_buf_count++;
234
235		if (!list_empty(&qcb->qos_list[i]))
236			netdev_warn(nic->netdev, "Index(%d) is piled!!\n", i);
237	}
238}
239
240static void send_qos_list(struct nic *nic, struct list_head *head)
241{
242	struct qos_entry_s *entry, *n;
243
244	list_for_each_entry_safe(entry, n, head, list) {
245		list_del(&entry->list);
246		gdm_wimax_send_tx(entry->skb, entry->dev);
247		free_qos_entry(entry);
248	}
249}
250
251int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev)
252{
253	struct nic *nic = netdev_priv(dev);
254	int index;
255	struct qos_cb_s *qcb = &nic->qos;
256	unsigned long flags;
257	struct ethhdr *ethh = (struct ethhdr *)(skb->data + HCI_HEADER_SIZE);
258	struct iphdr *iph = (struct iphdr *)((char *)ethh + ETH_HLEN);
259	struct tcphdr *tcph;
260	struct qos_entry_s *entry = NULL;
261	struct list_head send_list;
262	int ret = 0;
263
264	tcph = (struct tcphdr *)iph + iph->ihl*4;
265
266	if (ethh->h_proto == cpu_to_be16(ETH_P_IP)) {
267		if (qcb->qos_list_cnt && !qos_free_list.cnt) {
268			entry = alloc_qos_entry();
269			entry->skb = skb;
270			entry->dev = dev;
271			netdev_dbg(dev, "qcb->qos_list_cnt=%d\n",
272				   qcb->qos_list_cnt);
273		}
274
275		spin_lock_irqsave(&qcb->qos_lock, flags);
276		if (qcb->qos_list_cnt) {
277			index = get_qos_index(nic, (u8 *)iph, (u8 *)tcph);
278			if (index == -1)
279				index = qcb->qos_null_idx;
280
281			if (!entry) {
282				entry = alloc_qos_entry();
283				entry->skb = skb;
284				entry->dev = dev;
285			}
286
287			list_add_tail(&entry->list, &qcb->qos_list[index]);
288			extract_qos_list(nic, &send_list);
289			spin_unlock_irqrestore(&qcb->qos_lock, flags);
290			send_qos_list(nic, &send_list);
291			goto out;
292		}
293		spin_unlock_irqrestore(&qcb->qos_lock, flags);
294		if (entry)
295			free_qos_entry(entry);
296	}
297
298	ret = gdm_wimax_send_tx(skb, dev);
299out:
300	return ret;
301}
302
303static int get_csr(struct qos_cb_s *qcb, u32 sfid, int mode)
304{
305	int i;
306
307	for (i = 0; i < qcb->qos_list_cnt; i++) {
308		if (qcb->csr[i].sfid == sfid)
309			return i;
310	}
311
312	if (mode) {
313		for (i = 0; i < QOS_MAX; i++) {
314			if (!qcb->csr[i].enabled) {
315				qcb->csr[i].enabled = true;
316				qcb->qos_list_cnt++;
317				return i;
318			}
319		}
320	}
321	return -1;
322}
323
324#define QOS_CHANGE_DEL	0xFC
325#define QOS_ADD		0xFD
326#define QOS_REPORT	0xFE
327
328void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size)
329{
330	struct nic *nic = nic_ptr;
331	int i, index, pos;
332	u32 sfid;
333	u8 sub_cmd_evt;
334	struct qos_cb_s *qcb = &nic->qos;
335	struct qos_entry_s *entry, *n;
336	struct list_head send_list;
337	struct list_head free_list;
338	unsigned long flags;
339
340	sub_cmd_evt = (u8)buf[4];
341
342	if (sub_cmd_evt == QOS_REPORT) {
343		spin_lock_irqsave(&qcb->qos_lock, flags);
344		for (i = 0; i < qcb->qos_list_cnt; i++) {
345			sfid = ((buf[(i*5)+6]<<24)&0xff000000);
346			sfid += ((buf[(i*5)+7]<<16)&0xff0000);
347			sfid += ((buf[(i*5)+8]<<8)&0xff00);
348			sfid += (buf[(i*5)+9]);
349			index = get_csr(qcb, sfid, 0);
350			if (index == -1) {
351				spin_unlock_irqrestore(&qcb->qos_lock, flags);
352				netdev_err(nic->netdev, "QoS ERROR: No SF\n");
353				return;
354			}
355			qcb->csr[index].qos_buf_count = buf[(i*5)+10];
356		}
357
358		extract_qos_list(nic, &send_list);
359		spin_unlock_irqrestore(&qcb->qos_lock, flags);
360		send_qos_list(nic, &send_list);
361		return;
362	}
363
364	/* sub_cmd_evt == QOS_ADD || sub_cmd_evt == QOS_CHANG_DEL */
365	pos = 6;
366	sfid = ((buf[pos++]<<24)&0xff000000);
367	sfid += ((buf[pos++]<<16)&0xff0000);
368	sfid += ((buf[pos++]<<8)&0xff00);
369	sfid += (buf[pos++]);
370
371	index = get_csr(qcb, sfid, 1);
372	if (index == -1) {
373		netdev_err(nic->netdev,
374			   "QoS ERROR: csr Update Error / Wrong index (%d)\n",
375			   index);
376		return;
377	}
378
379	if (sub_cmd_evt == QOS_ADD) {
380		netdev_dbg(nic->netdev, "QOS_ADD SFID = 0x%x, index=%d\n",
381			   sfid, index);
382
383		spin_lock_irqsave(&qcb->qos_lock, flags);
384		qcb->csr[index].sfid = sfid;
385		qcb->csr[index].classifier_rule_en = ((buf[pos++]<<8)&0xff00);
386		qcb->csr[index].classifier_rule_en += buf[pos++];
387		if (qcb->csr[index].classifier_rule_en == 0)
388			qcb->qos_null_idx = index;
389		qcb->csr[index].ip2s_mask = buf[pos++];
390		qcb->csr[index].ip2s_lo = buf[pos++];
391		qcb->csr[index].ip2s_hi = buf[pos++];
392		qcb->csr[index].protocol = buf[pos++];
393		qcb->csr[index].ipsrc_addrmask[0] = buf[pos++];
394		qcb->csr[index].ipsrc_addrmask[1] = buf[pos++];
395		qcb->csr[index].ipsrc_addrmask[2] = buf[pos++];
396		qcb->csr[index].ipsrc_addrmask[3] = buf[pos++];
397		qcb->csr[index].ipsrc_addr[0] = buf[pos++];
398		qcb->csr[index].ipsrc_addr[1] = buf[pos++];
399		qcb->csr[index].ipsrc_addr[2] = buf[pos++];
400		qcb->csr[index].ipsrc_addr[3] = buf[pos++];
401		qcb->csr[index].ipdst_addrmask[0] = buf[pos++];
402		qcb->csr[index].ipdst_addrmask[1] = buf[pos++];
403		qcb->csr[index].ipdst_addrmask[2] = buf[pos++];
404		qcb->csr[index].ipdst_addrmask[3] = buf[pos++];
405		qcb->csr[index].ipdst_addr[0] = buf[pos++];
406		qcb->csr[index].ipdst_addr[1] = buf[pos++];
407		qcb->csr[index].ipdst_addr[2] = buf[pos++];
408		qcb->csr[index].ipdst_addr[3] = buf[pos++];
409		qcb->csr[index].srcport_lo = ((buf[pos++]<<8)&0xff00);
410		qcb->csr[index].srcport_lo += buf[pos++];
411		qcb->csr[index].srcport_hi = ((buf[pos++]<<8)&0xff00);
412		qcb->csr[index].srcport_hi += buf[pos++];
413		qcb->csr[index].dstport_lo = ((buf[pos++]<<8)&0xff00);
414		qcb->csr[index].dstport_lo += buf[pos++];
415		qcb->csr[index].dstport_hi = ((buf[pos++]<<8)&0xff00);
416		qcb->csr[index].dstport_hi += buf[pos++];
417
418		qcb->qos_limit_size = 254/qcb->qos_list_cnt;
419		spin_unlock_irqrestore(&qcb->qos_lock, flags);
420	} else if (sub_cmd_evt == QOS_CHANGE_DEL) {
421		netdev_dbg(nic->netdev, "QOS_CHANGE_DEL SFID = 0x%x, index=%d\n",
422			   sfid, index);
423
424		INIT_LIST_HEAD(&free_list);
425
426		spin_lock_irqsave(&qcb->qos_lock, flags);
427		qcb->csr[index].enabled = false;
428		qcb->qos_list_cnt--;
429		qcb->qos_limit_size = 254/qcb->qos_list_cnt;
430
431		list_for_each_entry_safe(entry, n, &qcb->qos_list[index],
432					 list) {
433			list_move_tail(&entry->list, &free_list);
434		}
435		spin_unlock_irqrestore(&qcb->qos_lock, flags);
436		free_qos_entry_list(&free_list);
437	}
438}
439