This source file includes following definitions.
- fill_match_fields
- fill_action_fields
- cxgb4_config_knode
- cxgb4_delete_knode
- cxgb4_cleanup_tc_u32
- cxgb4_init_tc_u32
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_mirred.h>
37
38 #include "cxgb4.h"
39 #include "cxgb4_tc_u32_parse.h"
40 #include "cxgb4_tc_u32.h"
41
42
43 static int fill_match_fields(struct adapter *adap,
44 struct ch_filter_specification *fs,
45 struct tc_cls_u32_offload *cls,
46 const struct cxgb4_match_field *entry,
47 bool next_header)
48 {
49 unsigned int i, j;
50 u32 val, mask;
51 int off, err;
52 bool found;
53
54 for (i = 0; i < cls->knode.sel->nkeys; i++) {
55 off = cls->knode.sel->keys[i].off;
56 val = cls->knode.sel->keys[i].val;
57 mask = cls->knode.sel->keys[i].mask;
58
59 if (next_header) {
60
61 if (!cls->knode.sel->keys[i].offmask)
62 continue;
63 } else {
64
65 if (cls->knode.sel->keys[i].offmask)
66 continue;
67 }
68
69 found = false;
70
71 for (j = 0; entry[j].val; j++) {
72 if (off == entry[j].off) {
73 found = true;
74 err = entry[j].val(fs, val, mask);
75 if (err)
76 return err;
77 break;
78 }
79 }
80
81 if (!found)
82 return -EINVAL;
83 }
84
85 return 0;
86 }
87
88
89 static int fill_action_fields(struct adapter *adap,
90 struct ch_filter_specification *fs,
91 struct tc_cls_u32_offload *cls)
92 {
93 unsigned int num_actions = 0;
94 const struct tc_action *a;
95 struct tcf_exts *exts;
96 int i;
97
98 exts = cls->knode.exts;
99 if (!tcf_exts_has_actions(exts))
100 return -EINVAL;
101
102 tcf_exts_for_each_action(i, a, exts) {
103
104 if (num_actions)
105 return -EINVAL;
106
107
108 if (is_tcf_gact_shot(a)) {
109 fs->action = FILTER_DROP;
110 num_actions++;
111 continue;
112 }
113
114
115 if (is_tcf_mirred_egress_redirect(a)) {
116 struct net_device *n_dev, *target_dev;
117 bool found = false;
118 unsigned int i;
119
120 target_dev = tcf_mirred_dev(a);
121 for_each_port(adap, i) {
122 n_dev = adap->port[i];
123 if (target_dev == n_dev) {
124 fs->action = FILTER_SWITCH;
125 fs->eport = i;
126 found = true;
127 break;
128 }
129 }
130
131
132
133
134 if (!found)
135 return -EINVAL;
136
137 num_actions++;
138 continue;
139 }
140
141
142 return -EINVAL;
143 }
144
145 return 0;
146 }
147
148 int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
149 {
150 const struct cxgb4_match_field *start, *link_start = NULL;
151 struct adapter *adapter = netdev2adap(dev);
152 __be16 protocol = cls->common.protocol;
153 struct ch_filter_specification fs;
154 struct cxgb4_tc_u32_table *t;
155 struct cxgb4_link *link;
156 unsigned int filter_id;
157 u32 uhtid, link_uhtid;
158 bool is_ipv6 = false;
159 int ret;
160
161 if (!can_tc_u32_offload(dev))
162 return -EOPNOTSUPP;
163
164 if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
165 return -EOPNOTSUPP;
166
167
168 filter_id = cls->knode.handle & 0xFFFFF;
169
170 if (filter_id > adapter->tids.nftids) {
171 dev_err(adapter->pdev_dev,
172 "Location %d out of range for insertion. Max: %d\n",
173 filter_id, adapter->tids.nftids);
174 return -ERANGE;
175 }
176
177 t = adapter->tc_u32;
178 uhtid = TC_U32_USERHTID(cls->knode.handle);
179 link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
180
181
182
183
184 if (uhtid != 0x800 && uhtid >= t->size)
185 return -EINVAL;
186
187
188 if (link_uhtid >= t->size)
189 return -EINVAL;
190
191 memset(&fs, 0, sizeof(fs));
192
193 if (protocol == htons(ETH_P_IPV6)) {
194 start = cxgb4_ipv6_fields;
195 is_ipv6 = true;
196 } else {
197 start = cxgb4_ipv4_fields;
198 is_ipv6 = false;
199 }
200
201 if (uhtid != 0x800) {
202
203 if (!t->table[uhtid - 1].link_handle)
204 return -EINVAL;
205
206
207 link_start = t->table[uhtid - 1].match_field;
208 if (!link_start)
209 return -EINVAL;
210 }
211
212
213
214
215 if (link_uhtid) {
216 const struct cxgb4_next_header *next;
217 bool found = false;
218 unsigned int i, j;
219 u32 val, mask;
220 int off;
221
222 if (t->table[link_uhtid - 1].link_handle) {
223 dev_err(adapter->pdev_dev,
224 "Link handle exists for: 0x%x\n",
225 link_uhtid);
226 return -EINVAL;
227 }
228
229 next = is_ipv6 ? cxgb4_ipv6_jumps : cxgb4_ipv4_jumps;
230
231
232 for (i = 0; next[i].jump; i++) {
233 if (next[i].offoff != cls->knode.sel->offoff ||
234 next[i].shift != cls->knode.sel->offshift ||
235 next[i].mask != cls->knode.sel->offmask ||
236 next[i].offset != cls->knode.sel->off)
237 continue;
238
239
240
241
242
243 for (j = 0; j < cls->knode.sel->nkeys; j++) {
244 off = cls->knode.sel->keys[j].off;
245 val = cls->knode.sel->keys[j].val;
246 mask = cls->knode.sel->keys[j].mask;
247
248 if (next[i].match_off == off &&
249 next[i].match_val == val &&
250 next[i].match_mask == mask) {
251 found = true;
252 break;
253 }
254 }
255
256 if (!found)
257 continue;
258
259
260
261
262
263
264 ret = fill_match_fields(adapter, &fs, cls,
265 start, false);
266 if (ret)
267 goto out;
268
269 link = &t->table[link_uhtid - 1];
270 link->match_field = next[i].jump;
271 link->link_handle = cls->knode.handle;
272 memcpy(&link->fs, &fs, sizeof(fs));
273 break;
274 }
275
276
277 if (!found)
278 return -EINVAL;
279
280 return 0;
281 }
282
283
284
285
286
287 if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) {
288
289 memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
290 ret = fill_match_fields(adapter, &fs, cls,
291 link_start, true);
292 if (ret)
293 goto out;
294 }
295
296 ret = fill_match_fields(adapter, &fs, cls, start, false);
297 if (ret)
298 goto out;
299
300
301
302
303 ret = fill_action_fields(adapter, &fs, cls);
304 if (ret)
305 goto out;
306
307
308
309
310
311
312
313
314
315 fs.val.iport = netdev2pinfo(dev)->port_id;
316 fs.mask.iport = ~0;
317
318
319 fs.hitcnts = 1;
320
321
322 fs.type = is_ipv6 ? 1 : 0;
323
324
325 ret = cxgb4_set_filter(dev, filter_id, &fs);
326 if (ret)
327 goto out;
328
329
330
331
332
333 if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
334 set_bit(filter_id, t->table[uhtid - 1].tid_map);
335
336 out:
337 return ret;
338 }
339
340 int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
341 {
342 struct adapter *adapter = netdev2adap(dev);
343 unsigned int filter_id, max_tids, i, j;
344 struct cxgb4_link *link = NULL;
345 struct cxgb4_tc_u32_table *t;
346 u32 handle, uhtid;
347 int ret;
348
349 if (!can_tc_u32_offload(dev))
350 return -EOPNOTSUPP;
351
352
353 filter_id = cls->knode.handle & 0xFFFFF;
354
355 if (filter_id > adapter->tids.nftids) {
356 dev_err(adapter->pdev_dev,
357 "Location %d out of range for deletion. Max: %d\n",
358 filter_id, adapter->tids.nftids);
359 return -ERANGE;
360 }
361
362 t = adapter->tc_u32;
363 handle = cls->knode.handle;
364 uhtid = TC_U32_USERHTID(cls->knode.handle);
365
366
367
368
369 if (uhtid != 0x800 && uhtid >= t->size)
370 return -EINVAL;
371
372
373 if (uhtid != 0x800) {
374 link = &t->table[uhtid - 1];
375 if (!link->link_handle)
376 return -EINVAL;
377
378 if (!test_bit(filter_id, link->tid_map))
379 return -EINVAL;
380 }
381
382 ret = cxgb4_del_filter(dev, filter_id, NULL);
383 if (ret)
384 goto out;
385
386 if (link)
387 clear_bit(filter_id, link->tid_map);
388
389
390
391
392 max_tids = adapter->tids.nftids;
393 for (i = 0; i < t->size; i++) {
394 link = &t->table[i];
395
396 if (link->link_handle == handle) {
397 for (j = 0; j < max_tids; j++) {
398 if (!test_bit(j, link->tid_map))
399 continue;
400
401 ret = __cxgb4_del_filter(dev, j, NULL, NULL);
402 if (ret)
403 goto out;
404
405 clear_bit(j, link->tid_map);
406 }
407
408
409 link->match_field = NULL;
410 link->link_handle = 0;
411 memset(&link->fs, 0, sizeof(link->fs));
412 break;
413 }
414 }
415
416 out:
417 return ret;
418 }
419
420 void cxgb4_cleanup_tc_u32(struct adapter *adap)
421 {
422 struct cxgb4_tc_u32_table *t;
423 unsigned int i;
424
425 if (!adap->tc_u32)
426 return;
427
428
429 t = adap->tc_u32;
430 for (i = 0; i < t->size; i++) {
431 struct cxgb4_link *link = &t->table[i];
432
433 kvfree(link->tid_map);
434 }
435 kvfree(adap->tc_u32);
436 }
437
438 struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
439 {
440 unsigned int max_tids = adap->tids.nftids;
441 struct cxgb4_tc_u32_table *t;
442 unsigned int i;
443
444 if (!max_tids)
445 return NULL;
446
447 t = kvzalloc(struct_size(t, table, max_tids), GFP_KERNEL);
448 if (!t)
449 return NULL;
450
451 t->size = max_tids;
452
453 for (i = 0; i < t->size; i++) {
454 struct cxgb4_link *link = &t->table[i];
455 unsigned int bmap_size;
456
457 bmap_size = BITS_TO_LONGS(max_tids);
458 link->tid_map = kvcalloc(bmap_size, sizeof(unsigned long),
459 GFP_KERNEL);
460 if (!link->tid_map)
461 goto out_no_mem;
462 bitmap_zero(link->tid_map, max_tids);
463 }
464
465 return t;
466
467 out_no_mem:
468 for (i = 0; i < t->size; i++) {
469 struct cxgb4_link *link = &t->table[i];
470
471 if (link->tid_map)
472 kvfree(link->tid_map);
473 }
474
475 if (t)
476 kvfree(t);
477
478 return NULL;
479 }