This source file includes following definitions.
- read_internal_timer
- mlx5_update_clock_info_page
- mlx5_pps_out
- mlx5_timestamp_overflow
- mlx5_ptp_settime
- mlx5_ptp_gettimex
- mlx5_ptp_adjtime
- mlx5_ptp_adjfreq
- mlx5_extts_configure
- mlx5_perout_configure
- mlx5_pps_configure
- mlx5_ptp_enable
- mlx5_ptp_verify
- mlx5_init_pin_config
- mlx5_get_pps_caps
- mlx5_pps_event
- mlx5_init_clock
- mlx5_cleanup_clock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <rdma/mlx5-abi.h>
36 #include "lib/eq.h"
37 #include "en.h"
38 #include "clock.h"
39
40 enum {
41 MLX5_CYCLES_SHIFT = 23
42 };
43
44 enum {
45 MLX5_PIN_MODE_IN = 0x0,
46 MLX5_PIN_MODE_OUT = 0x1,
47 };
48
49 enum {
50 MLX5_OUT_PATTERN_PULSE = 0x0,
51 MLX5_OUT_PATTERN_PERIODIC = 0x1,
52 };
53
54 enum {
55 MLX5_EVENT_MODE_DISABLE = 0x0,
56 MLX5_EVENT_MODE_REPETETIVE = 0x1,
57 MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
58 };
59
60 enum {
61 MLX5_MTPPS_FS_ENABLE = BIT(0x0),
62 MLX5_MTPPS_FS_PATTERN = BIT(0x2),
63 MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
64 MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
65 MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
66 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
67 };
68
69 static u64 read_internal_timer(const struct cyclecounter *cc)
70 {
71 struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
72 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
73 clock);
74
75 return mlx5_read_internal_timer(mdev, NULL) & cc->mask;
76 }
77
78 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
79 {
80 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
81 struct mlx5_clock *clock = &mdev->clock;
82 u32 sign;
83
84 if (!clock_info)
85 return;
86
87 sign = smp_load_acquire(&clock_info->sign);
88 smp_store_mb(clock_info->sign,
89 sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
90
91 clock_info->cycles = clock->tc.cycle_last;
92 clock_info->mult = clock->cycles.mult;
93 clock_info->nsec = clock->tc.nsec;
94 clock_info->frac = clock->tc.frac;
95
96 smp_store_release(&clock_info->sign,
97 sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
98 }
99
100 static void mlx5_pps_out(struct work_struct *work)
101 {
102 struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
103 out_work);
104 struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
105 pps_info);
106 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
107 clock);
108 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
109 unsigned long flags;
110 int i;
111
112 for (i = 0; i < clock->ptp_info.n_pins; i++) {
113 u64 tstart;
114
115 write_seqlock_irqsave(&clock->lock, flags);
116 tstart = clock->pps_info.start[i];
117 clock->pps_info.start[i] = 0;
118 write_sequnlock_irqrestore(&clock->lock, flags);
119 if (!tstart)
120 continue;
121
122 MLX5_SET(mtpps_reg, in, pin, i);
123 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
124 MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
125 mlx5_set_mtpps(mdev, in, sizeof(in));
126 }
127 }
128
129 static void mlx5_timestamp_overflow(struct work_struct *work)
130 {
131 struct delayed_work *dwork = to_delayed_work(work);
132 struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
133 overflow_work);
134 unsigned long flags;
135
136 write_seqlock_irqsave(&clock->lock, flags);
137 timecounter_read(&clock->tc);
138 mlx5_update_clock_info_page(clock->mdev);
139 write_sequnlock_irqrestore(&clock->lock, flags);
140 schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
141 }
142
143 static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
144 const struct timespec64 *ts)
145 {
146 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
147 ptp_info);
148 u64 ns = timespec64_to_ns(ts);
149 unsigned long flags;
150
151 write_seqlock_irqsave(&clock->lock, flags);
152 timecounter_init(&clock->tc, &clock->cycles, ns);
153 mlx5_update_clock_info_page(clock->mdev);
154 write_sequnlock_irqrestore(&clock->lock, flags);
155
156 return 0;
157 }
158
159 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
160 struct ptp_system_timestamp *sts)
161 {
162 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
163 ptp_info);
164 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
165 clock);
166 unsigned long flags;
167 u64 cycles, ns;
168
169 write_seqlock_irqsave(&clock->lock, flags);
170 cycles = mlx5_read_internal_timer(mdev, sts);
171 ns = timecounter_cyc2time(&clock->tc, cycles);
172 write_sequnlock_irqrestore(&clock->lock, flags);
173
174 *ts = ns_to_timespec64(ns);
175
176 return 0;
177 }
178
179 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
180 {
181 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
182 ptp_info);
183 unsigned long flags;
184
185 write_seqlock_irqsave(&clock->lock, flags);
186 timecounter_adjtime(&clock->tc, delta);
187 mlx5_update_clock_info_page(clock->mdev);
188 write_sequnlock_irqrestore(&clock->lock, flags);
189
190 return 0;
191 }
192
193 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
194 {
195 u64 adj;
196 u32 diff;
197 unsigned long flags;
198 int neg_adj = 0;
199 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
200 ptp_info);
201
202 if (delta < 0) {
203 neg_adj = 1;
204 delta = -delta;
205 }
206
207 adj = clock->nominal_c_mult;
208 adj *= delta;
209 diff = div_u64(adj, 1000000000ULL);
210
211 write_seqlock_irqsave(&clock->lock, flags);
212 timecounter_read(&clock->tc);
213 clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
214 clock->nominal_c_mult + diff;
215 mlx5_update_clock_info_page(clock->mdev);
216 write_sequnlock_irqrestore(&clock->lock, flags);
217
218 return 0;
219 }
220
221 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
222 struct ptp_clock_request *rq,
223 int on)
224 {
225 struct mlx5_clock *clock =
226 container_of(ptp, struct mlx5_clock, ptp_info);
227 struct mlx5_core_dev *mdev =
228 container_of(clock, struct mlx5_core_dev, clock);
229 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
230 u32 field_select = 0;
231 u8 pin_mode = 0;
232 u8 pattern = 0;
233 int pin = -1;
234 int err = 0;
235
236 if (!MLX5_PPS_CAP(mdev))
237 return -EOPNOTSUPP;
238
239
240 if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
241 PTP_RISING_EDGE |
242 PTP_FALLING_EDGE |
243 PTP_STRICT_FLAGS))
244 return -EOPNOTSUPP;
245
246
247 if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
248 (rq->extts.flags & PTP_ENABLE_FEATURE) &&
249 (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
250 return -EOPNOTSUPP;
251
252 if (rq->extts.index >= clock->ptp_info.n_pins)
253 return -EINVAL;
254
255 if (on) {
256 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
257 if (pin < 0)
258 return -EBUSY;
259 pin_mode = MLX5_PIN_MODE_IN;
260 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
261 field_select = MLX5_MTPPS_FS_PIN_MODE |
262 MLX5_MTPPS_FS_PATTERN |
263 MLX5_MTPPS_FS_ENABLE;
264 } else {
265 pin = rq->extts.index;
266 field_select = MLX5_MTPPS_FS_ENABLE;
267 }
268
269 MLX5_SET(mtpps_reg, in, pin, pin);
270 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
271 MLX5_SET(mtpps_reg, in, pattern, pattern);
272 MLX5_SET(mtpps_reg, in, enable, on);
273 MLX5_SET(mtpps_reg, in, field_select, field_select);
274
275 err = mlx5_set_mtpps(mdev, in, sizeof(in));
276 if (err)
277 return err;
278
279 return mlx5_set_mtppse(mdev, pin, 0,
280 MLX5_EVENT_MODE_REPETETIVE & on);
281 }
282
283 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
284 struct ptp_clock_request *rq,
285 int on)
286 {
287 struct mlx5_clock *clock =
288 container_of(ptp, struct mlx5_clock, ptp_info);
289 struct mlx5_core_dev *mdev =
290 container_of(clock, struct mlx5_core_dev, clock);
291 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
292 u64 nsec_now, nsec_delta, time_stamp = 0;
293 u64 cycles_now, cycles_delta;
294 struct timespec64 ts;
295 unsigned long flags;
296 u32 field_select = 0;
297 u8 pin_mode = 0;
298 u8 pattern = 0;
299 int pin = -1;
300 int err = 0;
301 s64 ns;
302
303 if (!MLX5_PPS_CAP(mdev))
304 return -EOPNOTSUPP;
305
306
307 if (rq->perout.flags)
308 return -EOPNOTSUPP;
309
310 if (rq->perout.index >= clock->ptp_info.n_pins)
311 return -EINVAL;
312
313 if (on) {
314 pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
315 rq->perout.index);
316 if (pin < 0)
317 return -EBUSY;
318
319 pin_mode = MLX5_PIN_MODE_OUT;
320 pattern = MLX5_OUT_PATTERN_PERIODIC;
321 ts.tv_sec = rq->perout.period.sec;
322 ts.tv_nsec = rq->perout.period.nsec;
323 ns = timespec64_to_ns(&ts);
324
325 if ((ns >> 1) != 500000000LL)
326 return -EINVAL;
327
328 ts.tv_sec = rq->perout.start.sec;
329 ts.tv_nsec = rq->perout.start.nsec;
330 ns = timespec64_to_ns(&ts);
331 cycles_now = mlx5_read_internal_timer(mdev, NULL);
332 write_seqlock_irqsave(&clock->lock, flags);
333 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
334 nsec_delta = ns - nsec_now;
335 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
336 clock->cycles.mult);
337 write_sequnlock_irqrestore(&clock->lock, flags);
338 time_stamp = cycles_now + cycles_delta;
339 field_select = MLX5_MTPPS_FS_PIN_MODE |
340 MLX5_MTPPS_FS_PATTERN |
341 MLX5_MTPPS_FS_ENABLE |
342 MLX5_MTPPS_FS_TIME_STAMP;
343 } else {
344 pin = rq->perout.index;
345 field_select = MLX5_MTPPS_FS_ENABLE;
346 }
347
348 MLX5_SET(mtpps_reg, in, pin, pin);
349 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
350 MLX5_SET(mtpps_reg, in, pattern, pattern);
351 MLX5_SET(mtpps_reg, in, enable, on);
352 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
353 MLX5_SET(mtpps_reg, in, field_select, field_select);
354
355 err = mlx5_set_mtpps(mdev, in, sizeof(in));
356 if (err)
357 return err;
358
359 return mlx5_set_mtppse(mdev, pin, 0,
360 MLX5_EVENT_MODE_REPETETIVE & on);
361 }
362
363 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
364 struct ptp_clock_request *rq,
365 int on)
366 {
367 struct mlx5_clock *clock =
368 container_of(ptp, struct mlx5_clock, ptp_info);
369
370 clock->pps_info.enabled = !!on;
371 return 0;
372 }
373
374 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
375 struct ptp_clock_request *rq,
376 int on)
377 {
378 switch (rq->type) {
379 case PTP_CLK_REQ_EXTTS:
380 return mlx5_extts_configure(ptp, rq, on);
381 case PTP_CLK_REQ_PEROUT:
382 return mlx5_perout_configure(ptp, rq, on);
383 case PTP_CLK_REQ_PPS:
384 return mlx5_pps_configure(ptp, rq, on);
385 default:
386 return -EOPNOTSUPP;
387 }
388 return 0;
389 }
390
391 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
392 enum ptp_pin_function func, unsigned int chan)
393 {
394 return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
395 }
396
397 static const struct ptp_clock_info mlx5_ptp_clock_info = {
398 .owner = THIS_MODULE,
399 .name = "mlx5_p2p",
400 .max_adj = 100000000,
401 .n_alarm = 0,
402 .n_ext_ts = 0,
403 .n_per_out = 0,
404 .n_pins = 0,
405 .pps = 0,
406 .adjfreq = mlx5_ptp_adjfreq,
407 .adjtime = mlx5_ptp_adjtime,
408 .gettimex64 = mlx5_ptp_gettimex,
409 .settime64 = mlx5_ptp_settime,
410 .enable = NULL,
411 .verify = NULL,
412 };
413
414 static int mlx5_init_pin_config(struct mlx5_clock *clock)
415 {
416 int i;
417
418 clock->ptp_info.pin_config =
419 kcalloc(clock->ptp_info.n_pins,
420 sizeof(*clock->ptp_info.pin_config),
421 GFP_KERNEL);
422 if (!clock->ptp_info.pin_config)
423 return -ENOMEM;
424 clock->ptp_info.enable = mlx5_ptp_enable;
425 clock->ptp_info.verify = mlx5_ptp_verify;
426 clock->ptp_info.pps = 1;
427
428 for (i = 0; i < clock->ptp_info.n_pins; i++) {
429 snprintf(clock->ptp_info.pin_config[i].name,
430 sizeof(clock->ptp_info.pin_config[i].name),
431 "mlx5_pps%d", i);
432 clock->ptp_info.pin_config[i].index = i;
433 clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
434 clock->ptp_info.pin_config[i].chan = i;
435 }
436
437 return 0;
438 }
439
440 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
441 {
442 struct mlx5_clock *clock = &mdev->clock;
443 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
444
445 mlx5_query_mtpps(mdev, out, sizeof(out));
446
447 clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
448 cap_number_of_pps_pins);
449 clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
450 cap_max_num_of_pps_in_pins);
451 clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
452 cap_max_num_of_pps_out_pins);
453
454 clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
455 clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
456 clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
457 clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
458 clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
459 clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
460 clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
461 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
462 }
463
464 static int mlx5_pps_event(struct notifier_block *nb,
465 unsigned long type, void *data)
466 {
467 struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
468 struct mlx5_core_dev *mdev = clock->mdev;
469 struct ptp_clock_event ptp_event;
470 u64 cycles_now, cycles_delta;
471 u64 nsec_now, nsec_delta, ns;
472 struct mlx5_eqe *eqe = data;
473 int pin = eqe->data.pps.pin;
474 struct timespec64 ts;
475 unsigned long flags;
476
477 switch (clock->ptp_info.pin_config[pin].func) {
478 case PTP_PF_EXTTS:
479 ptp_event.index = pin;
480 ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
481 be64_to_cpu(eqe->data.pps.time_stamp));
482 if (clock->pps_info.enabled) {
483 ptp_event.type = PTP_CLOCK_PPSUSR;
484 ptp_event.pps_times.ts_real =
485 ns_to_timespec64(ptp_event.timestamp);
486 } else {
487 ptp_event.type = PTP_CLOCK_EXTTS;
488 }
489
490 ptp_clock_event(clock->ptp, &ptp_event);
491 break;
492 case PTP_PF_PEROUT:
493 mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
494 cycles_now = mlx5_read_internal_timer(mdev, NULL);
495 ts.tv_sec += 1;
496 ts.tv_nsec = 0;
497 ns = timespec64_to_ns(&ts);
498 write_seqlock_irqsave(&clock->lock, flags);
499 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
500 nsec_delta = ns - nsec_now;
501 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
502 clock->cycles.mult);
503 clock->pps_info.start[pin] = cycles_now + cycles_delta;
504 schedule_work(&clock->pps_info.out_work);
505 write_sequnlock_irqrestore(&clock->lock, flags);
506 break;
507 default:
508 mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
509 clock->ptp_info.pin_config[pin].func);
510 }
511
512 return NOTIFY_OK;
513 }
514
515 void mlx5_init_clock(struct mlx5_core_dev *mdev)
516 {
517 struct mlx5_clock *clock = &mdev->clock;
518 u64 overflow_cycles;
519 u64 ns;
520 u64 frac = 0;
521 u32 dev_freq;
522
523 dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
524 if (!dev_freq) {
525 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
526 return;
527 }
528 seqlock_init(&clock->lock);
529 clock->cycles.read = read_internal_timer;
530 clock->cycles.shift = MLX5_CYCLES_SHIFT;
531 clock->cycles.mult = clocksource_khz2mult(dev_freq,
532 clock->cycles.shift);
533 clock->nominal_c_mult = clock->cycles.mult;
534 clock->cycles.mask = CLOCKSOURCE_MASK(41);
535 clock->mdev = mdev;
536
537 timecounter_init(&clock->tc, &clock->cycles,
538 ktime_to_ns(ktime_get_real()));
539
540
541
542
543
544
545
546
547 overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
548 overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
549
550 ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
551 frac, &frac);
552 do_div(ns, NSEC_PER_SEC / HZ);
553 clock->overflow_period = ns;
554
555 mdev->clock_info =
556 (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
557 if (mdev->clock_info) {
558 mdev->clock_info->nsec = clock->tc.nsec;
559 mdev->clock_info->cycles = clock->tc.cycle_last;
560 mdev->clock_info->mask = clock->cycles.mask;
561 mdev->clock_info->mult = clock->nominal_c_mult;
562 mdev->clock_info->shift = clock->cycles.shift;
563 mdev->clock_info->frac = clock->tc.frac;
564 mdev->clock_info->overflow_period = clock->overflow_period;
565 }
566
567 INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
568 INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
569 if (clock->overflow_period)
570 schedule_delayed_work(&clock->overflow_work, 0);
571 else
572 mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
573
574
575 clock->ptp_info = mlx5_ptp_clock_info;
576
577
578 if (MLX5_PPS_CAP(mdev))
579 mlx5_get_pps_caps(mdev);
580 if (clock->ptp_info.n_pins)
581 mlx5_init_pin_config(clock);
582
583 clock->ptp = ptp_clock_register(&clock->ptp_info,
584 &mdev->pdev->dev);
585 if (IS_ERR(clock->ptp)) {
586 mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
587 PTR_ERR(clock->ptp));
588 clock->ptp = NULL;
589 }
590
591 MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
592 mlx5_eq_notifier_register(mdev, &clock->pps_nb);
593 }
594
595 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
596 {
597 struct mlx5_clock *clock = &mdev->clock;
598
599 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
600 return;
601
602 mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
603 if (clock->ptp) {
604 ptp_clock_unregister(clock->ptp);
605 clock->ptp = NULL;
606 }
607
608 cancel_work_sync(&clock->pps_info.out_work);
609 cancel_delayed_work_sync(&clock->overflow_work);
610
611 if (mdev->clock_info) {
612 free_page((unsigned long)mdev->clock_info);
613 mdev->clock_info = NULL;
614 }
615
616 kfree(clock->ptp_info.pin_config);
617 }