This source file includes following definitions.
- mlx5_cq_tasklet_cb
- mlx5_add_cq_to_tasklet
- mlx5_core_create_cq
- mlx5_core_destroy_cq
- mlx5_core_query_cq
- mlx5_core_modify_cq
- mlx5_core_modify_cq_moderation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/hardirq.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/cmd.h>
38 #include <rdma/ib_verbs.h>
39 #include <linux/mlx5/cq.h>
40 #include "mlx5_core.h"
41 #include "lib/eq.h"
42
43 #define TASKLET_MAX_TIME 2
44 #define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
45
46 void mlx5_cq_tasklet_cb(unsigned long data)
47 {
48 unsigned long flags;
49 unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
50 struct mlx5_eq_tasklet *ctx = (struct mlx5_eq_tasklet *)data;
51 struct mlx5_core_cq *mcq;
52 struct mlx5_core_cq *temp;
53
54 spin_lock_irqsave(&ctx->lock, flags);
55 list_splice_tail_init(&ctx->list, &ctx->process_list);
56 spin_unlock_irqrestore(&ctx->lock, flags);
57
58 list_for_each_entry_safe(mcq, temp, &ctx->process_list,
59 tasklet_ctx.list) {
60 list_del_init(&mcq->tasklet_ctx.list);
61 mcq->tasklet_ctx.comp(mcq, NULL);
62 mlx5_cq_put(mcq);
63 if (time_after(jiffies, end))
64 break;
65 }
66
67 if (!list_empty(&ctx->process_list))
68 tasklet_schedule(&ctx->task);
69 }
70
71 static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
72 struct mlx5_eqe *eqe)
73 {
74 unsigned long flags;
75 struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
76
77 spin_lock_irqsave(&tasklet_ctx->lock, flags);
78
79
80
81
82
83 if (list_empty_careful(&cq->tasklet_ctx.list)) {
84 mlx5_cq_hold(cq);
85 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
86 }
87 spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
88 }
89
90 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
91 u32 *in, int inlen, u32 *out, int outlen)
92 {
93 int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn);
94 u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
95 u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
96 struct mlx5_eq_comp *eq;
97 int err;
98
99 eq = mlx5_eqn2comp_eq(dev, eqn);
100 if (IS_ERR(eq))
101 return PTR_ERR(eq);
102
103 memset(out, 0, outlen);
104 MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
105 err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
106 if (err)
107 return err;
108
109 cq->cqn = MLX5_GET(create_cq_out, out, cqn);
110 cq->cons_index = 0;
111 cq->arm_sn = 0;
112 cq->eq = eq;
113 cq->uid = MLX5_GET(create_cq_in, in, uid);
114 refcount_set(&cq->refcount, 1);
115 init_completion(&cq->free);
116 if (!cq->comp)
117 cq->comp = mlx5_add_cq_to_tasklet;
118
119 cq->tasklet_ctx.priv = &eq->tasklet_ctx;
120 INIT_LIST_HEAD(&cq->tasklet_ctx.list);
121
122
123 err = mlx5_eq_add_cq(&eq->core, cq);
124 if (err)
125 goto err_cmd;
126
127
128 err = mlx5_eq_add_cq(mlx5_get_async_eq(dev), cq);
129 if (err)
130 goto err_cq_add;
131
132 cq->pid = current->pid;
133 err = mlx5_debug_cq_add(dev, cq);
134 if (err)
135 mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
136 cq->cqn);
137
138 cq->uar = dev->priv.uar;
139
140 return 0;
141
142 err_cq_add:
143 mlx5_eq_del_cq(&eq->core, cq);
144 err_cmd:
145 memset(din, 0, sizeof(din));
146 memset(dout, 0, sizeof(dout));
147 MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
148 MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
149 MLX5_SET(destroy_cq_in, din, uid, cq->uid);
150 mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
151 return err;
152 }
153 EXPORT_SYMBOL(mlx5_core_create_cq);
154
155 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
156 {
157 u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
158 u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
159 int err;
160
161 mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
162 mlx5_eq_del_cq(&cq->eq->core, cq);
163
164 MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
165 MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
166 MLX5_SET(destroy_cq_in, in, uid, cq->uid);
167 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
168 if (err)
169 return err;
170
171 synchronize_irq(cq->irqn);
172
173 mlx5_debug_cq_remove(dev, cq);
174 mlx5_cq_put(cq);
175 wait_for_completion(&cq->free);
176
177 return 0;
178 }
179 EXPORT_SYMBOL(mlx5_core_destroy_cq);
180
181 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
182 u32 *out, int outlen)
183 {
184 u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
185
186 MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
187 MLX5_SET(query_cq_in, in, cqn, cq->cqn);
188 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
189 }
190 EXPORT_SYMBOL(mlx5_core_query_cq);
191
192 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
193 u32 *in, int inlen)
194 {
195 u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
196
197 MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
198 MLX5_SET(modify_cq_in, in, uid, cq->uid);
199 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
200 }
201 EXPORT_SYMBOL(mlx5_core_modify_cq);
202
203 int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
204 struct mlx5_core_cq *cq,
205 u16 cq_period,
206 u16 cq_max_count)
207 {
208 u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
209 void *cqc;
210
211 MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
212 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
213 MLX5_SET(cqc, cqc, cq_period, cq_period);
214 MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
215 MLX5_SET(modify_cq_in, in,
216 modify_field_select_resize_field_select.modify_field_select.modify_field_select,
217 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
218
219 return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
220 }
221 EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);