This source file includes following definitions.
- ice_sched_add_root_node
- ice_sched_find_node_by_teid
- ice_aqc_send_sched_elem_cmd
- ice_aq_query_sched_elems
- ice_sched_add_node
- ice_aq_delete_sched_elems
- ice_sched_remove_elems
- ice_sched_get_first_node
- ice_sched_get_tc_node
- ice_free_sched_node
- ice_aq_get_dflt_topo
- ice_aq_add_sched_elems
- ice_aq_suspend_sched_elems
- ice_aq_resume_sched_elems
- ice_aq_query_sched_res
- ice_sched_suspend_resume_elems
- ice_alloc_lan_q_ctx
- ice_sched_clear_agg
- ice_sched_clear_tx_topo
- ice_sched_clear_port
- ice_sched_cleanup_all
- ice_sched_add_elems
- ice_sched_add_nodes_to_layer
- ice_sched_get_qgrp_layer
- ice_sched_get_vsi_layer
- ice_rm_dflt_leaf_node
- ice_sched_rm_dflt_nodes
- ice_sched_init_port
- ice_sched_query_res_alloc
- ice_sched_find_node_in_subtree
- ice_sched_get_free_qparent
- ice_sched_get_vsi_node
- ice_sched_calc_vsi_child_nodes
- ice_sched_add_vsi_child_nodes
- ice_sched_calc_vsi_support_nodes
- ice_sched_add_vsi_support_nodes
- ice_sched_add_vsi_to_topo
- ice_sched_update_vsi_child_nodes
- ice_sched_cfg_vsi
- ice_sched_rm_agg_vsi_info
- ice_sched_is_leaf_node_present
- ice_sched_rm_vsi_cfg
- ice_rm_vsi_lan_cfg
1
2
3
4 #include "ice_sched.h"
5
6
7
8
9
10
11
12
13
14 static enum ice_status
15 ice_sched_add_root_node(struct ice_port_info *pi,
16 struct ice_aqc_txsched_elem_data *info)
17 {
18 struct ice_sched_node *root;
19 struct ice_hw *hw;
20
21 if (!pi)
22 return ICE_ERR_PARAM;
23
24 hw = pi->hw;
25
26 root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
27 if (!root)
28 return ICE_ERR_NO_MEMORY;
29
30
31 root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
32 sizeof(*root), GFP_KERNEL);
33 if (!root->children) {
34 devm_kfree(ice_hw_to_dev(hw), root);
35 return ICE_ERR_NO_MEMORY;
36 }
37
38 memcpy(&root->info, info, sizeof(*info));
39 pi->root = root;
40 return 0;
41 }
42
43
44
45
46
47
48
49
50
51
52
53
54 struct ice_sched_node *
55 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
56 {
57 u16 i;
58
59
60 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
61 return start_node;
62
63
64 if (!start_node->num_children ||
65 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
66 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
67 return NULL;
68
69
70 for (i = 0; i < start_node->num_children; i++)
71 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
72 return start_node->children[i];
73
74
75 for (i = 0; i < start_node->num_children; i++) {
76 struct ice_sched_node *tmp;
77
78 tmp = ice_sched_find_node_by_teid(start_node->children[i],
79 teid);
80 if (tmp)
81 return tmp;
82 }
83
84 return NULL;
85 }
86
87
88
89
90
91
92
93
94
95
96
97
98
99 static enum ice_status
100 ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
101 u16 elems_req, void *buf, u16 buf_size,
102 u16 *elems_resp, struct ice_sq_cd *cd)
103 {
104 struct ice_aqc_sched_elem_cmd *cmd;
105 struct ice_aq_desc desc;
106 enum ice_status status;
107
108 cmd = &desc.params.sched_elem_cmd;
109 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
110 cmd->num_elem_req = cpu_to_le16(elems_req);
111 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
112 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
113 if (!status && elems_resp)
114 *elems_resp = le16_to_cpu(cmd->num_elem_resp);
115
116 return status;
117 }
118
119
120
121
122
123
124
125
126
127
128
129
130 enum ice_status
131 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
132 struct ice_aqc_get_elem *buf, u16 buf_size,
133 u16 *elems_ret, struct ice_sq_cd *cd)
134 {
135 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
136 elems_req, (void *)buf, buf_size,
137 elems_ret, cd);
138 }
139
140
141
142
143
144
145
146
147
148 enum ice_status
149 ice_sched_add_node(struct ice_port_info *pi, u8 layer,
150 struct ice_aqc_txsched_elem_data *info)
151 {
152 struct ice_sched_node *parent;
153 struct ice_aqc_get_elem elem;
154 struct ice_sched_node *node;
155 enum ice_status status;
156 struct ice_hw *hw;
157
158 if (!pi)
159 return ICE_ERR_PARAM;
160
161 hw = pi->hw;
162
163
164 parent = ice_sched_find_node_by_teid(pi->root,
165 le32_to_cpu(info->parent_teid));
166 if (!parent) {
167 ice_debug(hw, ICE_DBG_SCHED,
168 "Parent Node not found for parent_teid=0x%x\n",
169 le32_to_cpu(info->parent_teid));
170 return ICE_ERR_PARAM;
171 }
172
173
174
175
176 status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
177 if (status)
178 return status;
179
180 node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
181 if (!node)
182 return ICE_ERR_NO_MEMORY;
183 if (hw->max_children[layer]) {
184
185 node->children = devm_kcalloc(ice_hw_to_dev(hw),
186 hw->max_children[layer],
187 sizeof(*node), GFP_KERNEL);
188 if (!node->children) {
189 devm_kfree(ice_hw_to_dev(hw), node);
190 return ICE_ERR_NO_MEMORY;
191 }
192 }
193
194 node->in_use = true;
195 node->parent = parent;
196 node->tx_sched_layer = layer;
197 parent->children[parent->num_children++] = node;
198 memcpy(&node->info, &elem.generic[0], sizeof(node->info));
199 return 0;
200 }
201
202
203
204
205
206
207
208
209
210
211
212
213 static enum ice_status
214 ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
215 struct ice_aqc_delete_elem *buf, u16 buf_size,
216 u16 *grps_del, struct ice_sq_cd *cd)
217 {
218 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
219 grps_req, (void *)buf, buf_size,
220 grps_del, cd);
221 }
222
223
224
225
226
227
228
229
230
231
232 static enum ice_status
233 ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
234 u16 num_nodes, u32 *node_teids)
235 {
236 struct ice_aqc_delete_elem *buf;
237 u16 i, num_groups_removed = 0;
238 enum ice_status status;
239 u16 buf_size;
240
241 buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
242 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
243 if (!buf)
244 return ICE_ERR_NO_MEMORY;
245
246 buf->hdr.parent_teid = parent->info.node_teid;
247 buf->hdr.num_elems = cpu_to_le16(num_nodes);
248 for (i = 0; i < num_nodes; i++)
249 buf->teid[i] = cpu_to_le32(node_teids[i]);
250
251 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
252 &num_groups_removed, NULL);
253 if (status || num_groups_removed != 1)
254 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
255 hw->adminq.sq_last_status);
256
257 devm_kfree(ice_hw_to_dev(hw), buf);
258 return status;
259 }
260
261
262
263
264
265
266
267
268
269 static struct ice_sched_node *
270 ice_sched_get_first_node(struct ice_port_info *pi,
271 struct ice_sched_node *parent, u8 layer)
272 {
273 return pi->sib_head[parent->tc_num][layer];
274 }
275
276
277
278
279
280
281
282
283 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
284 {
285 u8 i;
286
287 if (!pi || !pi->root)
288 return NULL;
289 for (i = 0; i < pi->root->num_children; i++)
290 if (pi->root->children[i]->tc_num == tc)
291 return pi->root->children[i];
292 return NULL;
293 }
294
295
296
297
298
299
300
301
302
303
304 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
305 {
306 struct ice_sched_node *parent;
307 struct ice_hw *hw = pi->hw;
308 u8 i, j;
309
310
311
312
313
314 while (node->num_children)
315 ice_free_sched_node(pi, node->children[0]);
316
317
318 if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
319 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
320 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
321 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
322 u32 teid = le32_to_cpu(node->info.node_teid);
323
324 ice_sched_remove_elems(hw, node->parent, 1, &teid);
325 }
326 parent = node->parent;
327
328 if (parent) {
329 struct ice_sched_node *p;
330
331
332 for (i = 0; i < parent->num_children; i++)
333 if (parent->children[i] == node) {
334 for (j = i + 1; j < parent->num_children; j++)
335 parent->children[j - 1] =
336 parent->children[j];
337 parent->num_children--;
338 break;
339 }
340
341 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
342 while (p) {
343 if (p->sibling == node) {
344 p->sibling = node->sibling;
345 break;
346 }
347 p = p->sibling;
348 }
349
350
351 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
352 pi->sib_head[node->tc_num][node->tx_sched_layer] =
353 node->sibling;
354 }
355
356
357 if (node->children)
358 devm_kfree(ice_hw_to_dev(hw), node->children);
359 devm_kfree(ice_hw_to_dev(hw), node);
360 }
361
362
363
364
365
366
367
368
369
370
371
372
373 static enum ice_status
374 ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
375 struct ice_aqc_get_topo_elem *buf, u16 buf_size,
376 u8 *num_branches, struct ice_sq_cd *cd)
377 {
378 struct ice_aqc_get_topo *cmd;
379 struct ice_aq_desc desc;
380 enum ice_status status;
381
382 cmd = &desc.params.get_topo;
383 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
384 cmd->port_num = lport;
385 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
386 if (!status && num_branches)
387 *num_branches = cmd->num_branches;
388
389 return status;
390 }
391
392
393
394
395
396
397
398
399
400
401
402
403 static enum ice_status
404 ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
405 struct ice_aqc_add_elem *buf, u16 buf_size,
406 u16 *grps_added, struct ice_sq_cd *cd)
407 {
408 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
409 grps_req, (void *)buf, buf_size,
410 grps_added, cd);
411 }
412
413
414
415
416
417
418
419
420
421
422
423
424 static enum ice_status
425 ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
426 struct ice_aqc_suspend_resume_elem *buf,
427 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
428 {
429 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
430 elems_req, (void *)buf, buf_size,
431 elems_ret, cd);
432 }
433
434
435
436
437
438
439
440
441
442
443
444
445 static enum ice_status
446 ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
447 struct ice_aqc_suspend_resume_elem *buf,
448 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
449 {
450 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
451 elems_req, (void *)buf, buf_size,
452 elems_ret, cd);
453 }
454
455
456
457
458
459
460
461
462
463
464 static enum ice_status
465 ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
466 struct ice_aqc_query_txsched_res_resp *buf,
467 struct ice_sq_cd *cd)
468 {
469 struct ice_aq_desc desc;
470
471 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
472 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
473 }
474
475
476
477
478
479
480
481
482
483
484 static enum ice_status
485 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
486 bool suspend)
487 {
488 struct ice_aqc_suspend_resume_elem *buf;
489 u16 i, buf_size, num_elem_ret = 0;
490 enum ice_status status;
491
492 buf_size = sizeof(*buf) * num_nodes;
493 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
494 if (!buf)
495 return ICE_ERR_NO_MEMORY;
496
497 for (i = 0; i < num_nodes; i++)
498 buf->teid[i] = cpu_to_le32(node_teids[i]);
499
500 if (suspend)
501 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
502 buf_size, &num_elem_ret,
503 NULL);
504 else
505 status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
506 buf_size, &num_elem_ret,
507 NULL);
508 if (status || num_elem_ret != num_nodes)
509 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
510
511 devm_kfree(ice_hw_to_dev(hw), buf);
512 return status;
513 }
514
515
516
517
518
519
520
521
522 static enum ice_status
523 ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
524 {
525 struct ice_vsi_ctx *vsi_ctx;
526 struct ice_q_ctx *q_ctx;
527
528 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
529 if (!vsi_ctx)
530 return ICE_ERR_PARAM;
531
532 if (!vsi_ctx->lan_q_ctx[tc]) {
533 vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
534 new_numqs,
535 sizeof(*q_ctx),
536 GFP_KERNEL);
537 if (!vsi_ctx->lan_q_ctx[tc])
538 return ICE_ERR_NO_MEMORY;
539 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
540 return 0;
541 }
542
543 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
544 u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
545
546 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
547 sizeof(*q_ctx), GFP_KERNEL);
548 if (!q_ctx)
549 return ICE_ERR_NO_MEMORY;
550 memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
551 prev_num * sizeof(*q_ctx));
552 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
553 vsi_ctx->lan_q_ctx[tc] = q_ctx;
554 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
555 }
556 return 0;
557 }
558
559
560
561
562
563
564
565
566 void ice_sched_clear_agg(struct ice_hw *hw)
567 {
568 struct ice_sched_agg_info *agg_info;
569 struct ice_sched_agg_info *atmp;
570
571 list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
572 struct ice_sched_agg_vsi_info *agg_vsi_info;
573 struct ice_sched_agg_vsi_info *vtmp;
574
575 list_for_each_entry_safe(agg_vsi_info, vtmp,
576 &agg_info->agg_vsi_list, list_entry) {
577 list_del(&agg_vsi_info->list_entry);
578 devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
579 }
580 list_del(&agg_info->list_entry);
581 devm_kfree(ice_hw_to_dev(hw), agg_info);
582 }
583 }
584
585
586
587
588
589
590
591 static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
592 {
593 if (!pi)
594 return;
595 if (pi->root) {
596 ice_free_sched_node(pi, pi->root);
597 pi->root = NULL;
598 }
599 }
600
601
602
603
604
605
606
607 void ice_sched_clear_port(struct ice_port_info *pi)
608 {
609 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
610 return;
611
612 pi->port_state = ICE_SCHED_PORT_STATE_INIT;
613 mutex_lock(&pi->sched_lock);
614 ice_sched_clear_tx_topo(pi);
615 mutex_unlock(&pi->sched_lock);
616 mutex_destroy(&pi->sched_lock);
617 }
618
619
620
621
622
623
624
625 void ice_sched_cleanup_all(struct ice_hw *hw)
626 {
627 if (!hw)
628 return;
629
630 if (hw->layer_info) {
631 devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
632 hw->layer_info = NULL;
633 }
634
635 if (hw->port_info)
636 ice_sched_clear_port(hw->port_info);
637
638 hw->num_tx_sched_layers = 0;
639 hw->num_tx_sched_phys_layers = 0;
640 hw->flattened_layers = 0;
641 hw->max_cgds = 0;
642 }
643
644
645
646
647
648
649
650
651
652
653
654
655
656 static enum ice_status
657 ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
658 struct ice_sched_node *parent, u8 layer, u16 num_nodes,
659 u16 *num_nodes_added, u32 *first_node_teid)
660 {
661 struct ice_sched_node *prev, *new_node;
662 struct ice_aqc_add_elem *buf;
663 u16 i, num_groups_added = 0;
664 enum ice_status status = 0;
665 struct ice_hw *hw = pi->hw;
666 size_t buf_size;
667 u32 teid;
668
669 buf_size = struct_size(buf, generic, num_nodes - 1);
670 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
671 if (!buf)
672 return ICE_ERR_NO_MEMORY;
673
674 buf->hdr.parent_teid = parent->info.node_teid;
675 buf->hdr.num_elems = cpu_to_le16(num_nodes);
676 for (i = 0; i < num_nodes; i++) {
677 buf->generic[i].parent_teid = parent->info.node_teid;
678 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
679 buf->generic[i].data.valid_sections =
680 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
681 ICE_AQC_ELEM_VALID_EIR;
682 buf->generic[i].data.generic = 0;
683 buf->generic[i].data.cir_bw.bw_profile_idx =
684 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
685 buf->generic[i].data.cir_bw.bw_alloc =
686 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
687 buf->generic[i].data.eir_bw.bw_profile_idx =
688 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
689 buf->generic[i].data.eir_bw.bw_alloc =
690 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
691 }
692
693 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
694 &num_groups_added, NULL);
695 if (status || num_groups_added != 1) {
696 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
697 hw->adminq.sq_last_status);
698 devm_kfree(ice_hw_to_dev(hw), buf);
699 return ICE_ERR_CFG;
700 }
701
702 *num_nodes_added = num_nodes;
703
704 for (i = 0; i < num_nodes; i++) {
705 status = ice_sched_add_node(pi, layer, &buf->generic[i]);
706 if (status) {
707 ice_debug(hw, ICE_DBG_SCHED,
708 "add nodes in SW DB failed status =%d\n",
709 status);
710 break;
711 }
712
713 teid = le32_to_cpu(buf->generic[i].node_teid);
714 new_node = ice_sched_find_node_by_teid(parent, teid);
715 if (!new_node) {
716 ice_debug(hw, ICE_DBG_SCHED,
717 "Node is missing for teid =%d\n", teid);
718 break;
719 }
720
721 new_node->sibling = NULL;
722 new_node->tc_num = tc_node->tc_num;
723
724
725
726 prev = ice_sched_get_first_node(pi, tc_node, layer);
727 if (prev && prev != new_node) {
728 while (prev->sibling)
729 prev = prev->sibling;
730 prev->sibling = new_node;
731 }
732
733
734 if (!pi->sib_head[tc_node->tc_num][layer])
735 pi->sib_head[tc_node->tc_num][layer] = new_node;
736
737 if (i == 0)
738 *first_node_teid = teid;
739 }
740
741 devm_kfree(ice_hw_to_dev(hw), buf);
742 return status;
743 }
744
745
746
747
748
749
750
751
752
753
754
755
756
757 static enum ice_status
758 ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
759 struct ice_sched_node *tc_node,
760 struct ice_sched_node *parent, u8 layer,
761 u16 num_nodes, u32 *first_node_teid,
762 u16 *num_nodes_added)
763 {
764 u32 *first_teid_ptr = first_node_teid;
765 u16 new_num_nodes, max_child_nodes;
766 enum ice_status status = 0;
767 struct ice_hw *hw = pi->hw;
768 u16 num_added = 0;
769 u32 temp;
770
771 *num_nodes_added = 0;
772
773 if (!num_nodes)
774 return status;
775
776 if (!parent || layer < hw->sw_entry_point_layer)
777 return ICE_ERR_PARAM;
778
779
780 max_child_nodes = hw->max_children[parent->tx_sched_layer];
781
782
783 if ((parent->num_children + num_nodes) > max_child_nodes) {
784
785 if (parent == tc_node)
786 return ICE_ERR_CFG;
787
788
789 if (parent->num_children < max_child_nodes) {
790 new_num_nodes = max_child_nodes - parent->num_children;
791
792
793
794 status = ice_sched_add_nodes_to_layer(pi, tc_node,
795 parent, layer,
796 new_num_nodes,
797 first_node_teid,
798 &num_added);
799 if (status)
800 return status;
801
802 *num_nodes_added += num_added;
803 }
804
805
806
807
808 if (num_added)
809 first_teid_ptr = &temp;
810
811 new_num_nodes = num_nodes - num_added;
812
813
814 parent = parent->sibling;
815
816
817
818
819
820
821 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
822 layer, new_num_nodes,
823 first_teid_ptr,
824 &num_added);
825 *num_nodes_added += num_added;
826 return status;
827 }
828
829 status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
830 num_nodes_added, first_node_teid);
831 return status;
832 }
833
834
835
836
837
838
839
840 static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
841 {
842
843 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
844 }
845
846
847
848
849
850
851
852 static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
853 {
854
855
856
857
858
859
860 if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
861 u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
862
863 if (layer > hw->sw_entry_point_layer)
864 return layer;
865 }
866 return hw->sw_entry_point_layer;
867 }
868
869
870
871
872
873
874
875
876 static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
877 {
878 struct ice_sched_node *node;
879
880 node = pi->root;
881 while (node) {
882 if (!node->num_children)
883 break;
884 node = node->children[0];
885 }
886 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
887 u32 teid = le32_to_cpu(node->info.node_teid);
888 enum ice_status status;
889
890
891 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
892 if (!status)
893 ice_free_sched_node(pi, node);
894 }
895 }
896
897
898
899
900
901
902
903
904 static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
905 {
906 struct ice_sched_node *node;
907
908 ice_rm_dflt_leaf_node(pi);
909
910
911 node = pi->root;
912 while (node) {
913 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
914 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
915 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
916 ice_free_sched_node(pi, node);
917 break;
918 }
919
920 if (!node->num_children)
921 break;
922 node = node->children[0];
923 }
924 }
925
926
927
928
929
930
931
932
933
934 enum ice_status ice_sched_init_port(struct ice_port_info *pi)
935 {
936 struct ice_aqc_get_topo_elem *buf;
937 enum ice_status status;
938 struct ice_hw *hw;
939 u8 num_branches;
940 u16 num_elems;
941 u8 i, j;
942
943 if (!pi)
944 return ICE_ERR_PARAM;
945 hw = pi->hw;
946
947
948 buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
949 if (!buf)
950 return ICE_ERR_NO_MEMORY;
951
952
953 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
954 &num_branches, NULL);
955 if (status)
956 goto err_init_port;
957
958
959 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
960 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
961 num_branches);
962 status = ICE_ERR_PARAM;
963 goto err_init_port;
964 }
965
966
967 num_elems = le16_to_cpu(buf[0].hdr.num_elems);
968
969
970 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
971 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
972 num_elems);
973 status = ICE_ERR_PARAM;
974 goto err_init_port;
975 }
976
977
978
979
980 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
981 ICE_AQC_ELEM_TYPE_LEAF)
982 pi->last_node_teid =
983 le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
984 else
985 pi->last_node_teid =
986 le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
987
988
989 status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
990 if (status)
991 goto err_init_port;
992
993
994 for (i = 0; i < num_branches; i++) {
995 num_elems = le16_to_cpu(buf[i].hdr.num_elems);
996
997
998 for (j = 1; j < num_elems; j++) {
999
1000 if (buf[0].generic[j].data.elem_type ==
1001 ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1002 hw->sw_entry_point_layer = j;
1003
1004 status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
1005 if (status)
1006 goto err_init_port;
1007 }
1008 }
1009
1010
1011 if (pi->root)
1012 ice_sched_rm_dflt_nodes(pi);
1013
1014
1015 pi->port_state = ICE_SCHED_PORT_STATE_READY;
1016 mutex_init(&pi->sched_lock);
1017
1018 err_init_port:
1019 if (status && pi->root) {
1020 ice_free_sched_node(pi, pi->root);
1021 pi->root = NULL;
1022 }
1023
1024 devm_kfree(ice_hw_to_dev(hw), buf);
1025 return status;
1026 }
1027
1028
1029
1030
1031
1032
1033
1034 enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1035 {
1036 struct ice_aqc_query_txsched_res_resp *buf;
1037 enum ice_status status = 0;
1038 __le16 max_sibl;
1039 u16 i;
1040
1041 if (hw->layer_info)
1042 return status;
1043
1044 buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
1045 if (!buf)
1046 return ICE_ERR_NO_MEMORY;
1047
1048 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1049 if (status)
1050 goto sched_query_out;
1051
1052 hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
1053 hw->num_tx_sched_phys_layers =
1054 le16_to_cpu(buf->sched_props.phys_levels);
1055 hw->flattened_layers = buf->sched_props.flattening_bitmap;
1056 hw->max_cgds = buf->sched_props.max_pf_cgds;
1057
1058
1059
1060
1061
1062
1063
1064
1065 for (i = 0; i < hw->num_tx_sched_layers; i++) {
1066 max_sibl = buf->layer_props[i].max_sibl_grp_sz;
1067 hw->max_children[i] = le16_to_cpu(max_sibl);
1068 }
1069
1070 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
1071 (hw->num_tx_sched_layers *
1072 sizeof(*hw->layer_info)),
1073 GFP_KERNEL);
1074 if (!hw->layer_info) {
1075 status = ICE_ERR_NO_MEMORY;
1076 goto sched_query_out;
1077 }
1078
1079 sched_query_out:
1080 devm_kfree(ice_hw_to_dev(hw), buf);
1081 return status;
1082 }
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093 static bool
1094 ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1095 struct ice_sched_node *node)
1096 {
1097 u8 i;
1098
1099 for (i = 0; i < base->num_children; i++) {
1100 struct ice_sched_node *child = base->children[i];
1101
1102 if (node == child)
1103 return true;
1104
1105 if (child->tx_sched_layer > node->tx_sched_layer)
1106 return false;
1107
1108
1109
1110
1111 if (ice_sched_find_node_in_subtree(hw, child, node))
1112 return true;
1113 }
1114 return false;
1115 }
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126 struct ice_sched_node *
1127 ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
1128 u8 owner)
1129 {
1130 struct ice_sched_node *vsi_node, *qgrp_node = NULL;
1131 struct ice_vsi_ctx *vsi_ctx;
1132 u16 max_children;
1133 u8 qgrp_layer;
1134
1135 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1136 max_children = pi->hw->max_children[qgrp_layer];
1137
1138 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1139 if (!vsi_ctx)
1140 return NULL;
1141 vsi_node = vsi_ctx->sched.vsi_node[tc];
1142
1143 if (!vsi_node)
1144 goto lan_q_exit;
1145
1146
1147 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
1148 while (qgrp_node) {
1149
1150 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1151 if (qgrp_node->num_children < max_children &&
1152 qgrp_node->owner == owner)
1153 break;
1154 qgrp_node = qgrp_node->sibling;
1155 }
1156
1157 lan_q_exit:
1158 return qgrp_node;
1159 }
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170 static struct ice_sched_node *
1171 ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
1172 u16 vsi_handle)
1173 {
1174 struct ice_sched_node *node;
1175 u8 vsi_layer;
1176
1177 vsi_layer = ice_sched_get_vsi_layer(hw);
1178 node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer);
1179
1180
1181 while (node) {
1182 if (node->vsi_handle == vsi_handle)
1183 return node;
1184 node = node->sibling;
1185 }
1186
1187 return node;
1188 }
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199 static void
1200 ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1201 {
1202 u16 num = num_qs;
1203 u8 i, qgl, vsil;
1204
1205 qgl = ice_sched_get_qgrp_layer(hw);
1206 vsil = ice_sched_get_vsi_layer(hw);
1207
1208
1209 for (i = qgl; i > vsil; i--) {
1210
1211 num = DIV_ROUND_UP(num, hw->max_children[i]);
1212
1213
1214 num_nodes[i] = num ? num : 1;
1215 }
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229 static enum ice_status
1230 ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1231 struct ice_sched_node *tc_node, u16 *num_nodes,
1232 u8 owner)
1233 {
1234 struct ice_sched_node *parent, *node;
1235 struct ice_hw *hw = pi->hw;
1236 enum ice_status status;
1237 u32 first_node_teid;
1238 u16 num_added = 0;
1239 u8 i, qgl, vsil;
1240
1241 qgl = ice_sched_get_qgrp_layer(hw);
1242 vsil = ice_sched_get_vsi_layer(hw);
1243 parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1244 for (i = vsil + 1; i <= qgl; i++) {
1245 if (!parent)
1246 return ICE_ERR_CFG;
1247
1248 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1249 num_nodes[i],
1250 &first_node_teid,
1251 &num_added);
1252 if (status || num_nodes[i] != num_added)
1253 return ICE_ERR_CFG;
1254
1255
1256
1257
1258 if (num_added) {
1259 parent = ice_sched_find_node_by_teid(tc_node,
1260 first_node_teid);
1261 node = parent;
1262 while (node) {
1263 node->owner = owner;
1264 node = node->sibling;
1265 }
1266 } else {
1267 parent = parent->children[0];
1268 }
1269 }
1270
1271 return 0;
1272 }
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284 static void
1285 ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
1286 struct ice_sched_node *tc_node, u16 *num_nodes)
1287 {
1288 struct ice_sched_node *node;
1289 u8 vsil;
1290 int i;
1291
1292 vsil = ice_sched_get_vsi_layer(hw);
1293 for (i = vsil; i >= hw->sw_entry_point_layer; i--)
1294
1295
1296
1297 if (!tc_node->num_children || i == vsil) {
1298 num_nodes[i]++;
1299 } else {
1300
1301
1302
1303 node = ice_sched_get_first_node(hw->port_info, tc_node,
1304 (u8)i);
1305
1306 while (node) {
1307 if (node->num_children < hw->max_children[i])
1308 break;
1309 node = node->sibling;
1310 }
1311
1312
1313
1314
1315
1316 if (node)
1317 break;
1318
1319 num_nodes[i]++;
1320 }
1321 }
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333 static enum ice_status
1334 ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
1335 struct ice_sched_node *tc_node, u16 *num_nodes)
1336 {
1337 struct ice_sched_node *parent = tc_node;
1338 enum ice_status status;
1339 u32 first_node_teid;
1340 u16 num_added = 0;
1341 u8 i, vsil;
1342
1343 if (!pi)
1344 return ICE_ERR_PARAM;
1345
1346 vsil = ice_sched_get_vsi_layer(pi->hw);
1347 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1348 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1349 i, num_nodes[i],
1350 &first_node_teid,
1351 &num_added);
1352 if (status || num_nodes[i] != num_added)
1353 return ICE_ERR_CFG;
1354
1355
1356
1357
1358 if (num_added)
1359 parent = ice_sched_find_node_by_teid(tc_node,
1360 first_node_teid);
1361 else
1362 parent = parent->children[0];
1363
1364 if (!parent)
1365 return ICE_ERR_CFG;
1366
1367 if (i == vsil)
1368 parent->vsi_handle = vsi_handle;
1369 }
1370
1371 return 0;
1372 }
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382 static enum ice_status
1383 ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
1384 {
1385 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1386 struct ice_sched_node *tc_node;
1387 struct ice_hw *hw = pi->hw;
1388
1389 tc_node = ice_sched_get_tc_node(pi, tc);
1390 if (!tc_node)
1391 return ICE_ERR_PARAM;
1392
1393
1394 ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
1395
1396
1397 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
1398 num_nodes);
1399 }
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411 static enum ice_status
1412 ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1413 u8 tc, u16 new_numqs, u8 owner)
1414 {
1415 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1416 struct ice_sched_node *vsi_node;
1417 struct ice_sched_node *tc_node;
1418 struct ice_vsi_ctx *vsi_ctx;
1419 enum ice_status status = 0;
1420 struct ice_hw *hw = pi->hw;
1421 u16 prev_numqs;
1422
1423 tc_node = ice_sched_get_tc_node(pi, tc);
1424 if (!tc_node)
1425 return ICE_ERR_CFG;
1426
1427 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1428 if (!vsi_node)
1429 return ICE_ERR_CFG;
1430
1431 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1432 if (!vsi_ctx)
1433 return ICE_ERR_PARAM;
1434
1435 prev_numqs = vsi_ctx->sched.max_lanq[tc];
1436
1437 if (new_numqs <= prev_numqs)
1438 return status;
1439 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
1440 if (status)
1441 return status;
1442
1443 if (new_numqs)
1444 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
1445
1446
1447
1448
1449
1450
1451
1452 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
1453 new_num_nodes, owner);
1454 if (status)
1455 return status;
1456 vsi_ctx->sched.max_lanq[tc] = new_numqs;
1457
1458 return 0;
1459 }
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474 enum ice_status
1475 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
1476 u8 owner, bool enable)
1477 {
1478 struct ice_sched_node *vsi_node, *tc_node;
1479 struct ice_vsi_ctx *vsi_ctx;
1480 enum ice_status status = 0;
1481 struct ice_hw *hw = pi->hw;
1482
1483 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
1484 tc_node = ice_sched_get_tc_node(pi, tc);
1485 if (!tc_node)
1486 return ICE_ERR_PARAM;
1487 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1488 if (!vsi_ctx)
1489 return ICE_ERR_PARAM;
1490 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1491
1492
1493 if (!enable) {
1494 if (vsi_node && vsi_node->in_use) {
1495 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1496
1497 status = ice_sched_suspend_resume_elems(hw, 1, &teid,
1498 true);
1499 if (!status)
1500 vsi_node->in_use = false;
1501 }
1502 return status;
1503 }
1504
1505
1506 if (!vsi_node) {
1507 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
1508 if (status)
1509 return status;
1510
1511 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1512 if (!vsi_node)
1513 return ICE_ERR_CFG;
1514
1515 vsi_ctx->sched.vsi_node[tc] = vsi_node;
1516 vsi_node->in_use = true;
1517
1518
1519
1520
1521 vsi_ctx->sched.max_lanq[tc] = 0;
1522 }
1523
1524
1525 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
1526 owner);
1527 if (status)
1528 return status;
1529
1530
1531 if (!vsi_node->in_use) {
1532 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1533
1534 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
1535 if (!status)
1536 vsi_node->in_use = true;
1537 }
1538
1539 return status;
1540 }
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550 static void
1551 ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
1552 {
1553 struct ice_sched_agg_info *agg_info;
1554 struct ice_sched_agg_info *atmp;
1555
1556 list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
1557 list_entry) {
1558 struct ice_sched_agg_vsi_info *agg_vsi_info;
1559 struct ice_sched_agg_vsi_info *vtmp;
1560
1561 list_for_each_entry_safe(agg_vsi_info, vtmp,
1562 &agg_info->agg_vsi_list, list_entry)
1563 if (agg_vsi_info->vsi_handle == vsi_handle) {
1564 list_del(&agg_vsi_info->list_entry);
1565 devm_kfree(ice_hw_to_dev(pi->hw),
1566 agg_vsi_info);
1567 return;
1568 }
1569 }
1570 }
1571
1572
1573
1574
1575
1576
1577
1578 static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
1579 {
1580 u8 i;
1581
1582 for (i = 0; i < node->num_children; i++)
1583 if (ice_sched_is_leaf_node_present(node->children[i]))
1584 return true;
1585
1586 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
1587 }
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598 static enum ice_status
1599 ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
1600 {
1601 enum ice_status status = ICE_ERR_PARAM;
1602 struct ice_vsi_ctx *vsi_ctx;
1603 u8 i;
1604
1605 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
1606 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
1607 return status;
1608 mutex_lock(&pi->sched_lock);
1609 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1610 if (!vsi_ctx)
1611 goto exit_sched_rm_vsi_cfg;
1612
1613 ice_for_each_traffic_class(i) {
1614 struct ice_sched_node *vsi_node, *tc_node;
1615 u8 j = 0;
1616
1617 tc_node = ice_sched_get_tc_node(pi, i);
1618 if (!tc_node)
1619 continue;
1620
1621 vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
1622 if (!vsi_node)
1623 continue;
1624
1625 if (ice_sched_is_leaf_node_present(vsi_node)) {
1626 ice_debug(pi->hw, ICE_DBG_SCHED,
1627 "VSI has leaf nodes in TC %d\n", i);
1628 status = ICE_ERR_IN_USE;
1629 goto exit_sched_rm_vsi_cfg;
1630 }
1631 while (j < vsi_node->num_children) {
1632 if (vsi_node->children[j]->owner == owner) {
1633 ice_free_sched_node(pi, vsi_node->children[j]);
1634
1635
1636
1637
1638 j = 0;
1639 } else {
1640 j++;
1641 }
1642 }
1643
1644 if (!vsi_node->num_children) {
1645 ice_free_sched_node(pi, vsi_node);
1646 vsi_ctx->sched.vsi_node[i] = NULL;
1647
1648
1649 ice_sched_rm_agg_vsi_info(pi, vsi_handle);
1650 }
1651 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1652 vsi_ctx->sched.max_lanq[i] = 0;
1653 }
1654 status = 0;
1655
1656 exit_sched_rm_vsi_cfg:
1657 mutex_unlock(&pi->sched_lock);
1658 return status;
1659 }
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669 enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
1670 {
1671 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
1672 }