This source file includes following definitions.
- qed_ptcdev_to_resc
- qed_ptp_res_lock
- qed_ptp_res_unlock
- qed_ptp_hw_read_rx_ts
- qed_ptp_hw_read_tx_ts
- qed_ptp_hw_read_cc
- qed_ptp_hw_cfg_filters
- qed_ptp_hw_adjfreq
- qed_ptp_hw_enable
- qed_ptp_hw_disable
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #include <linux/types.h>
33 #include "qed.h"
34 #include "qed_dev_api.h"
35 #include "qed_hw.h"
36 #include "qed_l2.h"
37 #include "qed_mcp.h"
38 #include "qed_reg_addr.h"
39
40
41 #define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT 0
42
43 #define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT 28
44
45 #define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
46 #define QED_TIMESTAMP_MASK BIT(16)
47
48 #define QED_PTP_UCAST_PARAM_MASK 0x70F
49
50 static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
51 {
52 switch (MFW_PORT(p_hwfn)) {
53 case 0:
54 return QED_RESC_LOCK_PTP_PORT0;
55 case 1:
56 return QED_RESC_LOCK_PTP_PORT1;
57 case 2:
58 return QED_RESC_LOCK_PTP_PORT2;
59 case 3:
60 return QED_RESC_LOCK_PTP_PORT3;
61 default:
62 return QED_RESC_LOCK_RESC_INVALID;
63 }
64 }
65
66 static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
67 {
68 struct qed_resc_lock_params params;
69 enum qed_resc_lock resource;
70 int rc;
71
72 resource = qed_ptcdev_to_resc(p_hwfn);
73 if (resource == QED_RESC_LOCK_RESC_INVALID)
74 return -EINVAL;
75
76 qed_mcp_resc_lock_default_init(¶ms, NULL, resource, true);
77
78 rc = qed_mcp_resc_lock(p_hwfn, p_ptt, ¶ms);
79 if (rc && rc != -EINVAL) {
80 return rc;
81 } else if (rc == -EINVAL) {
82
83
84
85 if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine)
86 return 0;
87
88 DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
89 return -EBUSY;
90 } else if (!rc && !params.b_granted) {
91 DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n");
92 return -EBUSY;
93 }
94
95 return rc;
96 }
97
98 static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
99 {
100 struct qed_resc_unlock_params params;
101 enum qed_resc_lock resource;
102 int rc;
103
104 resource = qed_ptcdev_to_resc(p_hwfn);
105 if (resource == QED_RESC_LOCK_RESC_INVALID)
106 return -EINVAL;
107
108 qed_mcp_resc_lock_default_init(NULL, ¶ms, resource, true);
109
110 rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, ¶ms);
111 if (rc == -EINVAL) {
112
113 if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) {
114 rc = 0;
115 } else {
116 DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
117 return -EINVAL;
118 }
119 } else if (rc) {
120 DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n");
121 }
122
123 return rc;
124 }
125
126
127 static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
128 {
129 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
130 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
131 u32 val;
132
133 *timestamp = 0;
134 val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID);
135 if (!(val & QED_TIMESTAMP_MASK)) {
136 DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val);
137 return -EINVAL;
138 }
139
140 val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB);
141 *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB);
142 *timestamp <<= 32;
143 *timestamp |= val;
144
145
146 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
147 QED_TIMESTAMP_MASK);
148
149 return 0;
150 }
151
152
153 static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp)
154 {
155 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
156 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
157 u32 val;
158
159 *timestamp = 0;
160 val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID);
161 if (!(val & QED_TIMESTAMP_MASK)) {
162 DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
163 "Invalid Tx timestamp, buf_seqid = %08x\n", val);
164 return -EINVAL;
165 }
166
167 val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB);
168 *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB);
169 *timestamp <<= 32;
170 *timestamp |= val;
171
172
173 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
174
175 return 0;
176 }
177
178
179 static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
180 {
181 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
182 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
183 u32 temp = 0;
184
185 temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB);
186 *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB);
187 *phc_cycles <<= 32;
188 *phc_cycles |= temp;
189
190 return 0;
191 }
192
193
194 static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev,
195 enum qed_ptp_filter_type rx_type,
196 enum qed_ptp_hwtstamp_tx_type tx_type)
197 {
198 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
199 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
200 u32 rule_mask, enable_cfg = 0x0;
201
202 switch (rx_type) {
203 case QED_PTP_FILTER_NONE:
204 enable_cfg = 0x0;
205 rule_mask = 0x3FFF;
206 break;
207 case QED_PTP_FILTER_ALL:
208 enable_cfg = 0x7;
209 rule_mask = 0x3CAA;
210 break;
211 case QED_PTP_FILTER_V1_L4_EVENT:
212 enable_cfg = 0x3;
213 rule_mask = 0x3FFA;
214 break;
215 case QED_PTP_FILTER_V1_L4_GEN:
216 enable_cfg = 0x3;
217 rule_mask = 0x3FFE;
218 break;
219 case QED_PTP_FILTER_V2_L4_EVENT:
220 enable_cfg = 0x5;
221 rule_mask = 0x3FAA;
222 break;
223 case QED_PTP_FILTER_V2_L4_GEN:
224 enable_cfg = 0x5;
225 rule_mask = 0x3FEE;
226 break;
227 case QED_PTP_FILTER_V2_L2_EVENT:
228 enable_cfg = 0x5;
229 rule_mask = 0x3CFF;
230 break;
231 case QED_PTP_FILTER_V2_L2_GEN:
232 enable_cfg = 0x5;
233 rule_mask = 0x3EFF;
234 break;
235 case QED_PTP_FILTER_V2_EVENT:
236 enable_cfg = 0x5;
237 rule_mask = 0x3CAA;
238 break;
239 case QED_PTP_FILTER_V2_GEN:
240 enable_cfg = 0x5;
241 rule_mask = 0x3EEE;
242 break;
243 default:
244 DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type);
245 return -EINVAL;
246 }
247
248 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK,
249 QED_PTP_UCAST_PARAM_MASK);
250 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
251 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg);
252
253 if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) {
254 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
255 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
256 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
257 } else {
258 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg);
259 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK,
260 QED_PTP_UCAST_PARAM_MASK);
261 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask);
262 }
263
264
265 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
266 QED_TIMESTAMP_MASK);
267
268 return 0;
269 }
270
271
272
273
274
275
276
277
278 static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
279 {
280 s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2;
281 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
282 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
283 u32 drift_ctr_cfg = 0, drift_state;
284 int drift_dir = 1;
285
286 if (ppb < 0) {
287 ppb = -ppb;
288 drift_dir = 0;
289 }
290
291 if (ppb > 1) {
292 s64 best_dif = ppb, best_approx_dev = 1;
293
294
295
296
297 for (val = 7; val > 0; val--) {
298 period = div_s64(val * 1000000000, ppb);
299 period -= 8;
300 period >>= 4;
301 if (period < 1)
302 period = 1;
303 if (period > 0xFFFFFFE)
304 period = 0xFFFFFFE;
305
306
307 approx_dev = period * 16 + 8;
308 dif = ppb * approx_dev - val * 1000000000;
309 dif2 = dif + 16 * ppb;
310
311 if (dif < 0)
312 dif = -dif;
313 if (dif2 < 0)
314 dif2 = -dif2;
315
316
317 if (dif * (approx_dev + 16) > dif2 * approx_dev) {
318 period++;
319 approx_dev += 16;
320 dif = dif2;
321 }
322
323
324 if (best_dif * approx_dev > dif * best_approx_dev) {
325 best_dif = dif;
326 best_val = val;
327 best_period = period;
328 best_approx_dev = approx_dev;
329 }
330 }
331 } else if (ppb == 1) {
332
333
334
335
336 best_val = 4;
337 best_period = 0xee6b27f;
338 } else {
339 best_val = 0;
340 best_period = 0xFFFFFFF;
341 }
342
343 drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) |
344 (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) |
345 (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT);
346
347 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1);
348
349 drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR);
350 if (drift_state & 1) {
351 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF,
352 drift_ctr_cfg);
353 } else {
354 DP_INFO(p_hwfn, "Drift counter is not reset\n");
355 return -EINVAL;
356 }
357
358 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
359
360 return 0;
361 }
362
363 static int qed_ptp_hw_enable(struct qed_dev *cdev)
364 {
365 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
366 struct qed_ptt *p_ptt;
367 int rc;
368
369 p_ptt = qed_ptt_acquire(p_hwfn);
370 if (!p_ptt) {
371 DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n");
372 return -EBUSY;
373 }
374
375 p_hwfn->p_ptp_ptt = p_ptt;
376
377 rc = qed_ptp_res_lock(p_hwfn, p_ptt);
378 if (rc) {
379 DP_INFO(p_hwfn,
380 "Couldn't acquire the resource lock, skip ptp enable for this PF\n");
381 qed_ptt_release(p_hwfn, p_ptt);
382 p_hwfn->p_ptp_ptt = NULL;
383 return rc;
384 }
385
386
387 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
388 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
389 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
390 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
391
392 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7);
393 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7);
394
395 qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
396
397
398 if (QED_IS_BB_B0(p_hwfn->cdev))
399 qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
400 if (QED_IS_AH(p_hwfn->cdev))
401 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
402
403 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
404 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
405
406 if (QED_IS_BB_B0(p_hwfn->cdev))
407 qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
408 if (QED_IS_AH(p_hwfn->cdev)) {
409 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
410 qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
411 }
412
413
414 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
415 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
416
417
418 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
419 QED_TIMESTAMP_MASK);
420 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
421
422 return 0;
423 }
424
425 static int qed_ptp_hw_disable(struct qed_dev *cdev)
426 {
427 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
428 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
429
430 qed_ptp_res_unlock(p_hwfn, p_ptt);
431
432
433 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
434 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
435
436 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
437 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
438
439
440 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
441 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
442
443 qed_ptt_release(p_hwfn, p_ptt);
444 p_hwfn->p_ptp_ptt = NULL;
445
446 return 0;
447 }
448
449 const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
450 .cfg_filters = qed_ptp_hw_cfg_filters,
451 .read_rx_ts = qed_ptp_hw_read_rx_ts,
452 .read_tx_ts = qed_ptp_hw_read_tx_ts,
453 .read_cc = qed_ptp_hw_read_cc,
454 .adjfreq = qed_ptp_hw_adjfreq,
455 .disable = qed_ptp_hw_disable,
456 .enable = qed_ptp_hw_enable,
457 };