This source file includes following definitions.
- qede_ptp_adjfreq
- qede_ptp_adjtime
- qede_ptp_gettime
- qede_ptp_settime
- qede_ptp_ancillary_feature_enable
- qede_ptp_task
- qede_ptp_read_cc
- qede_ptp_cfg_filters
- qede_ptp_hw_ts
- qede_ptp_get_ts_info
- qede_ptp_disable
- qede_ptp_init
- qede_ptp_enable
- qede_ptp_tx_ts
- qede_ptp_rx_ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #include "qede_ptp.h"
33 #define QEDE_PTP_TX_TIMEOUT (2 * HZ)
34
35 struct qede_ptp {
36 const struct qed_eth_ptp_ops *ops;
37 struct ptp_clock_info clock_info;
38 struct cyclecounter cc;
39 struct timecounter tc;
40 struct ptp_clock *clock;
41 struct work_struct work;
42 unsigned long ptp_tx_start;
43 struct qede_dev *edev;
44 struct sk_buff *tx_skb;
45
46
47
48
49 spinlock_t lock;
50 bool hw_ts_ioctl_called;
51 u16 tx_type;
52 u16 rx_filter;
53 };
54
55
56
57
58
59
60
61
62
63 static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb)
64 {
65 struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info);
66 struct qede_dev *edev = ptp->edev;
67 int rc;
68
69 __qede_lock(edev);
70 if (edev->state == QEDE_STATE_OPEN) {
71 spin_lock_bh(&ptp->lock);
72 rc = ptp->ops->adjfreq(edev->cdev, ppb);
73 spin_unlock_bh(&ptp->lock);
74 } else {
75 DP_ERR(edev, "PTP adjfreq called while interface is down\n");
76 rc = -EFAULT;
77 }
78 __qede_unlock(edev);
79
80 return rc;
81 }
82
83 static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
84 {
85 struct qede_dev *edev;
86 struct qede_ptp *ptp;
87
88 ptp = container_of(info, struct qede_ptp, clock_info);
89 edev = ptp->edev;
90
91 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n",
92 delta);
93
94 spin_lock_bh(&ptp->lock);
95 timecounter_adjtime(&ptp->tc, delta);
96 spin_unlock_bh(&ptp->lock);
97
98 return 0;
99 }
100
101 static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
102 {
103 struct qede_dev *edev;
104 struct qede_ptp *ptp;
105 u64 ns;
106
107 ptp = container_of(info, struct qede_ptp, clock_info);
108 edev = ptp->edev;
109
110 spin_lock_bh(&ptp->lock);
111 ns = timecounter_read(&ptp->tc);
112 spin_unlock_bh(&ptp->lock);
113
114 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns);
115
116 *ts = ns_to_timespec64(ns);
117
118 return 0;
119 }
120
121 static int qede_ptp_settime(struct ptp_clock_info *info,
122 const struct timespec64 *ts)
123 {
124 struct qede_dev *edev;
125 struct qede_ptp *ptp;
126 u64 ns;
127
128 ptp = container_of(info, struct qede_ptp, clock_info);
129 edev = ptp->edev;
130
131 ns = timespec64_to_ns(ts);
132
133 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns);
134
135
136 spin_lock_bh(&ptp->lock);
137 timecounter_init(&ptp->tc, &ptp->cc, ns);
138 spin_unlock_bh(&ptp->lock);
139
140 return 0;
141 }
142
143
144 static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info,
145 struct ptp_clock_request *rq,
146 int on)
147 {
148 struct qede_dev *edev;
149 struct qede_ptp *ptp;
150
151 ptp = container_of(info, struct qede_ptp, clock_info);
152 edev = ptp->edev;
153
154 DP_ERR(edev, "PHC ancillary features are not supported\n");
155
156 return -ENOTSUPP;
157 }
158
159 static void qede_ptp_task(struct work_struct *work)
160 {
161 struct skb_shared_hwtstamps shhwtstamps;
162 struct qede_dev *edev;
163 struct qede_ptp *ptp;
164 u64 timestamp, ns;
165 bool timedout;
166 int rc;
167
168 ptp = container_of(work, struct qede_ptp, work);
169 edev = ptp->edev;
170 timedout = time_is_before_jiffies(ptp->ptp_tx_start +
171 QEDE_PTP_TX_TIMEOUT);
172
173
174 spin_lock_bh(&ptp->lock);
175 rc = ptp->ops->read_tx_ts(edev->cdev, ×tamp);
176 spin_unlock_bh(&ptp->lock);
177 if (rc) {
178 if (unlikely(timedout)) {
179 DP_INFO(edev, "Tx timestamp is not recorded\n");
180 dev_kfree_skb_any(ptp->tx_skb);
181 ptp->tx_skb = NULL;
182 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
183 &edev->flags);
184 edev->ptp_skip_txts++;
185 } else {
186
187 schedule_work(&ptp->work);
188 }
189 return;
190 }
191
192 ns = timecounter_cyc2time(&ptp->tc, timestamp);
193 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
194 shhwtstamps.hwtstamp = ns_to_ktime(ns);
195 skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
196 dev_kfree_skb_any(ptp->tx_skb);
197 ptp->tx_skb = NULL;
198 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
199
200 DP_VERBOSE(edev, QED_MSG_DEBUG,
201 "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
202 timestamp, ns);
203 }
204
205
206 static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
207 {
208 struct qede_dev *edev;
209 struct qede_ptp *ptp;
210 u64 phc_cycles;
211 int rc;
212
213 ptp = container_of(cc, struct qede_ptp, cc);
214 edev = ptp->edev;
215 rc = ptp->ops->read_cc(edev->cdev, &phc_cycles);
216 if (rc)
217 WARN_ONCE(1, "PHC read err %d\n", rc);
218
219 DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles);
220
221 return phc_cycles;
222 }
223
224 static int qede_ptp_cfg_filters(struct qede_dev *edev)
225 {
226 enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
227 enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
228 struct qede_ptp *ptp = edev->ptp;
229
230 if (!ptp)
231 return -EIO;
232
233 if (!ptp->hw_ts_ioctl_called) {
234 DP_INFO(edev, "TS IOCTL not called\n");
235 return 0;
236 }
237
238 switch (ptp->tx_type) {
239 case HWTSTAMP_TX_ON:
240 set_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
241 tx_type = QED_PTP_HWTSTAMP_TX_ON;
242 break;
243
244 case HWTSTAMP_TX_OFF:
245 clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
246 tx_type = QED_PTP_HWTSTAMP_TX_OFF;
247 break;
248
249 case HWTSTAMP_TX_ONESTEP_SYNC:
250 DP_ERR(edev, "One-step timestamping is not supported\n");
251 return -ERANGE;
252 }
253
254 spin_lock_bh(&ptp->lock);
255 switch (ptp->rx_filter) {
256 case HWTSTAMP_FILTER_NONE:
257 rx_filter = QED_PTP_FILTER_NONE;
258 break;
259 case HWTSTAMP_FILTER_ALL:
260 case HWTSTAMP_FILTER_SOME:
261 case HWTSTAMP_FILTER_NTP_ALL:
262 ptp->rx_filter = HWTSTAMP_FILTER_NONE;
263 rx_filter = QED_PTP_FILTER_ALL;
264 break;
265 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
266 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
267 rx_filter = QED_PTP_FILTER_V1_L4_EVENT;
268 break;
269 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
270 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
271 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
272
273 rx_filter = QED_PTP_FILTER_V1_L4_GEN;
274 break;
275 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
276 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
277 rx_filter = QED_PTP_FILTER_V2_L4_EVENT;
278 break;
279 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
280 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
281 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
282
283 rx_filter = QED_PTP_FILTER_V2_L4_GEN;
284 break;
285 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
286 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
287 rx_filter = QED_PTP_FILTER_V2_L2_EVENT;
288 break;
289 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
290 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
291 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
292
293 rx_filter = QED_PTP_FILTER_V2_L2_GEN;
294 break;
295 case HWTSTAMP_FILTER_PTP_V2_EVENT:
296 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
297 rx_filter = QED_PTP_FILTER_V2_EVENT;
298 break;
299 case HWTSTAMP_FILTER_PTP_V2_SYNC:
300 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
301 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
302
303 rx_filter = QED_PTP_FILTER_V2_GEN;
304 break;
305 }
306
307 ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
308
309 spin_unlock_bh(&ptp->lock);
310
311 return 0;
312 }
313
314 int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
315 {
316 struct hwtstamp_config config;
317 struct qede_ptp *ptp;
318 int rc;
319
320 ptp = edev->ptp;
321 if (!ptp)
322 return -EIO;
323
324 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
325 return -EFAULT;
326
327 DP_VERBOSE(edev, QED_MSG_DEBUG,
328 "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
329 config.tx_type, config.rx_filter);
330
331 if (config.flags) {
332 DP_ERR(edev, "config.flags is reserved for future use\n");
333 return -EINVAL;
334 }
335
336 ptp->hw_ts_ioctl_called = 1;
337 ptp->tx_type = config.tx_type;
338 ptp->rx_filter = config.rx_filter;
339
340 rc = qede_ptp_cfg_filters(edev);
341 if (rc)
342 return rc;
343
344 config.rx_filter = ptp->rx_filter;
345
346 return copy_to_user(ifr->ifr_data, &config,
347 sizeof(config)) ? -EFAULT : 0;
348 }
349
350 int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
351 {
352 struct qede_ptp *ptp = edev->ptp;
353
354 if (!ptp) {
355 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
356 SOF_TIMESTAMPING_RX_SOFTWARE |
357 SOF_TIMESTAMPING_SOFTWARE;
358 info->phc_index = -1;
359
360 return 0;
361 }
362
363 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
364 SOF_TIMESTAMPING_RX_SOFTWARE |
365 SOF_TIMESTAMPING_SOFTWARE |
366 SOF_TIMESTAMPING_TX_HARDWARE |
367 SOF_TIMESTAMPING_RX_HARDWARE |
368 SOF_TIMESTAMPING_RAW_HARDWARE;
369
370 if (ptp->clock)
371 info->phc_index = ptp_clock_index(ptp->clock);
372 else
373 info->phc_index = -1;
374
375 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
376 BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
377 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
378 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
379 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
380 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
381 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
382 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
383 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
384 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
385 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
386 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
387 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
388
389 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
390
391 return 0;
392 }
393
394 void qede_ptp_disable(struct qede_dev *edev)
395 {
396 struct qede_ptp *ptp;
397
398 ptp = edev->ptp;
399 if (!ptp)
400 return;
401
402 if (ptp->clock) {
403 ptp_clock_unregister(ptp->clock);
404 ptp->clock = NULL;
405 }
406
407
408
409
410 cancel_work_sync(&ptp->work);
411 if (ptp->tx_skb) {
412 dev_kfree_skb_any(ptp->tx_skb);
413 ptp->tx_skb = NULL;
414 }
415
416
417 spin_lock_bh(&ptp->lock);
418 ptp->ops->disable(edev->cdev);
419 spin_unlock_bh(&ptp->lock);
420
421 kfree(ptp);
422 edev->ptp = NULL;
423 }
424
425 static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
426 {
427 struct qede_ptp *ptp;
428 int rc;
429
430 ptp = edev->ptp;
431 if (!ptp)
432 return -EINVAL;
433
434 spin_lock_init(&ptp->lock);
435
436
437 rc = ptp->ops->enable(edev->cdev);
438 if (rc) {
439 DP_INFO(edev, "PTP HW enable failed\n");
440 return rc;
441 }
442
443
444 INIT_WORK(&ptp->work, qede_ptp_task);
445
446
447
448
449
450 if (init_tc) {
451 memset(&ptp->cc, 0, sizeof(ptp->cc));
452 ptp->cc.read = qede_ptp_read_cc;
453 ptp->cc.mask = CYCLECOUNTER_MASK(64);
454 ptp->cc.shift = 0;
455 ptp->cc.mult = 1;
456
457 timecounter_init(&ptp->tc, &ptp->cc,
458 ktime_to_ns(ktime_get_real()));
459 }
460
461 return rc;
462 }
463
464 int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
465 {
466 struct qede_ptp *ptp;
467 int rc;
468
469 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
470 if (!ptp) {
471 DP_INFO(edev, "Failed to allocate struct for PTP\n");
472 return -ENOMEM;
473 }
474
475 ptp->edev = edev;
476 ptp->ops = edev->ops->ptp;
477 if (!ptp->ops) {
478 DP_INFO(edev, "PTP enable failed\n");
479 rc = -EIO;
480 goto err1;
481 }
482
483 edev->ptp = ptp;
484
485 rc = qede_ptp_init(edev, init_tc);
486 if (rc)
487 goto err1;
488
489 qede_ptp_cfg_filters(edev);
490
491
492 ptp->clock_info.owner = THIS_MODULE;
493 snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
494 ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB;
495 ptp->clock_info.n_alarm = 0;
496 ptp->clock_info.n_ext_ts = 0;
497 ptp->clock_info.n_per_out = 0;
498 ptp->clock_info.pps = 0;
499 ptp->clock_info.adjfreq = qede_ptp_adjfreq;
500 ptp->clock_info.adjtime = qede_ptp_adjtime;
501 ptp->clock_info.gettime64 = qede_ptp_gettime;
502 ptp->clock_info.settime64 = qede_ptp_settime;
503 ptp->clock_info.enable = qede_ptp_ancillary_feature_enable;
504
505 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
506 if (IS_ERR(ptp->clock)) {
507 DP_ERR(edev, "PTP clock registration failed\n");
508 qede_ptp_disable(edev);
509 rc = -EINVAL;
510 goto err2;
511 }
512
513 return 0;
514
515 err1:
516 kfree(ptp);
517 err2:
518 edev->ptp = NULL;
519
520 return rc;
521 }
522
523 void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
524 {
525 struct qede_ptp *ptp;
526
527 ptp = edev->ptp;
528 if (!ptp)
529 return;
530
531 if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
532 &edev->flags)) {
533 DP_ERR(edev, "Timestamping in progress\n");
534 edev->ptp_skip_txts++;
535 return;
536 }
537
538 if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) {
539 DP_ERR(edev,
540 "Tx timestamping was not enabled, this packet will not be timestamped\n");
541 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
542 edev->ptp_skip_txts++;
543 } else if (unlikely(ptp->tx_skb)) {
544 DP_ERR(edev,
545 "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
546 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
547 edev->ptp_skip_txts++;
548 } else {
549 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
550
551 ptp->tx_skb = skb_get(skb);
552 ptp->ptp_tx_start = jiffies;
553 schedule_work(&ptp->work);
554 }
555 }
556
557 void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb)
558 {
559 struct qede_ptp *ptp;
560 u64 timestamp, ns;
561 int rc;
562
563 ptp = edev->ptp;
564 if (!ptp)
565 return;
566
567 spin_lock_bh(&ptp->lock);
568 rc = ptp->ops->read_rx_ts(edev->cdev, ×tamp);
569 if (rc) {
570 spin_unlock_bh(&ptp->lock);
571 DP_INFO(edev, "Invalid Rx timestamp\n");
572 return;
573 }
574
575 ns = timecounter_cyc2time(&ptp->tc, timestamp);
576 spin_unlock_bh(&ptp->lock);
577 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
578 DP_VERBOSE(edev, QED_MSG_DEBUG,
579 "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
580 timestamp, ns);
581 }