root/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. read_internal_timer
  2. mlx5_update_clock_info_page
  3. mlx5_pps_out
  4. mlx5_timestamp_overflow
  5. mlx5_ptp_settime
  6. mlx5_ptp_gettimex
  7. mlx5_ptp_adjtime
  8. mlx5_ptp_adjfreq
  9. mlx5_extts_configure
  10. mlx5_perout_configure
  11. mlx5_pps_configure
  12. mlx5_ptp_enable
  13. mlx5_ptp_verify
  14. mlx5_init_pin_config
  15. mlx5_get_pps_caps
  16. mlx5_pps_event
  17. mlx5_init_clock
  18. mlx5_cleanup_clock

   1 /*
   2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 
  33 #include <linux/clocksource.h>
  34 #include <linux/highmem.h>
  35 #include <rdma/mlx5-abi.h>
  36 #include "lib/eq.h"
  37 #include "en.h"
  38 #include "clock.h"
  39 
  40 enum {
  41         MLX5_CYCLES_SHIFT       = 23
  42 };
  43 
  44 enum {
  45         MLX5_PIN_MODE_IN                = 0x0,
  46         MLX5_PIN_MODE_OUT               = 0x1,
  47 };
  48 
  49 enum {
  50         MLX5_OUT_PATTERN_PULSE          = 0x0,
  51         MLX5_OUT_PATTERN_PERIODIC       = 0x1,
  52 };
  53 
  54 enum {
  55         MLX5_EVENT_MODE_DISABLE = 0x0,
  56         MLX5_EVENT_MODE_REPETETIVE      = 0x1,
  57         MLX5_EVENT_MODE_ONCE_TILL_ARM   = 0x2,
  58 };
  59 
  60 enum {
  61         MLX5_MTPPS_FS_ENABLE                    = BIT(0x0),
  62         MLX5_MTPPS_FS_PATTERN                   = BIT(0x2),
  63         MLX5_MTPPS_FS_PIN_MODE                  = BIT(0x3),
  64         MLX5_MTPPS_FS_TIME_STAMP                = BIT(0x4),
  65         MLX5_MTPPS_FS_OUT_PULSE_DURATION        = BIT(0x5),
  66         MLX5_MTPPS_FS_ENH_OUT_PER_ADJ           = BIT(0x7),
  67 };
  68 
  69 static u64 read_internal_timer(const struct cyclecounter *cc)
  70 {
  71         struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
  72         struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
  73                                                   clock);
  74 
  75         return mlx5_read_internal_timer(mdev, NULL) & cc->mask;
  76 }
  77 
  78 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
  79 {
  80         struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
  81         struct mlx5_clock *clock = &mdev->clock;
  82         u32 sign;
  83 
  84         if (!clock_info)
  85                 return;
  86 
  87         sign = smp_load_acquire(&clock_info->sign);
  88         smp_store_mb(clock_info->sign,
  89                      sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
  90 
  91         clock_info->cycles = clock->tc.cycle_last;
  92         clock_info->mult   = clock->cycles.mult;
  93         clock_info->nsec   = clock->tc.nsec;
  94         clock_info->frac   = clock->tc.frac;
  95 
  96         smp_store_release(&clock_info->sign,
  97                           sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
  98 }
  99 
 100 static void mlx5_pps_out(struct work_struct *work)
 101 {
 102         struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
 103                                                  out_work);
 104         struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
 105                                                 pps_info);
 106         struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
 107                                                   clock);
 108         u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
 109         unsigned long flags;
 110         int i;
 111 
 112         for (i = 0; i < clock->ptp_info.n_pins; i++) {
 113                 u64 tstart;
 114 
 115                 write_seqlock_irqsave(&clock->lock, flags);
 116                 tstart = clock->pps_info.start[i];
 117                 clock->pps_info.start[i] = 0;
 118                 write_sequnlock_irqrestore(&clock->lock, flags);
 119                 if (!tstart)
 120                         continue;
 121 
 122                 MLX5_SET(mtpps_reg, in, pin, i);
 123                 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
 124                 MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
 125                 mlx5_set_mtpps(mdev, in, sizeof(in));
 126         }
 127 }
 128 
 129 static void mlx5_timestamp_overflow(struct work_struct *work)
 130 {
 131         struct delayed_work *dwork = to_delayed_work(work);
 132         struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
 133                                                 overflow_work);
 134         unsigned long flags;
 135 
 136         write_seqlock_irqsave(&clock->lock, flags);
 137         timecounter_read(&clock->tc);
 138         mlx5_update_clock_info_page(clock->mdev);
 139         write_sequnlock_irqrestore(&clock->lock, flags);
 140         schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
 141 }
 142 
 143 static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
 144                             const struct timespec64 *ts)
 145 {
 146         struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
 147                                                  ptp_info);
 148         u64 ns = timespec64_to_ns(ts);
 149         unsigned long flags;
 150 
 151         write_seqlock_irqsave(&clock->lock, flags);
 152         timecounter_init(&clock->tc, &clock->cycles, ns);
 153         mlx5_update_clock_info_page(clock->mdev);
 154         write_sequnlock_irqrestore(&clock->lock, flags);
 155 
 156         return 0;
 157 }
 158 
 159 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
 160                              struct ptp_system_timestamp *sts)
 161 {
 162         struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
 163                                                 ptp_info);
 164         struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
 165                                                   clock);
 166         unsigned long flags;
 167         u64 cycles, ns;
 168 
 169         write_seqlock_irqsave(&clock->lock, flags);
 170         cycles = mlx5_read_internal_timer(mdev, sts);
 171         ns = timecounter_cyc2time(&clock->tc, cycles);
 172         write_sequnlock_irqrestore(&clock->lock, flags);
 173 
 174         *ts = ns_to_timespec64(ns);
 175 
 176         return 0;
 177 }
 178 
 179 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 180 {
 181         struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
 182                                                 ptp_info);
 183         unsigned long flags;
 184 
 185         write_seqlock_irqsave(&clock->lock, flags);
 186         timecounter_adjtime(&clock->tc, delta);
 187         mlx5_update_clock_info_page(clock->mdev);
 188         write_sequnlock_irqrestore(&clock->lock, flags);
 189 
 190         return 0;
 191 }
 192 
 193 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
 194 {
 195         u64 adj;
 196         u32 diff;
 197         unsigned long flags;
 198         int neg_adj = 0;
 199         struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
 200                                                 ptp_info);
 201 
 202         if (delta < 0) {
 203                 neg_adj = 1;
 204                 delta = -delta;
 205         }
 206 
 207         adj = clock->nominal_c_mult;
 208         adj *= delta;
 209         diff = div_u64(adj, 1000000000ULL);
 210 
 211         write_seqlock_irqsave(&clock->lock, flags);
 212         timecounter_read(&clock->tc);
 213         clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
 214                                        clock->nominal_c_mult + diff;
 215         mlx5_update_clock_info_page(clock->mdev);
 216         write_sequnlock_irqrestore(&clock->lock, flags);
 217 
 218         return 0;
 219 }
 220 
 221 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
 222                                 struct ptp_clock_request *rq,
 223                                 int on)
 224 {
 225         struct mlx5_clock *clock =
 226                         container_of(ptp, struct mlx5_clock, ptp_info);
 227         struct mlx5_core_dev *mdev =
 228                         container_of(clock, struct mlx5_core_dev, clock);
 229         u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
 230         u32 field_select = 0;
 231         u8 pin_mode = 0;
 232         u8 pattern = 0;
 233         int pin = -1;
 234         int err = 0;
 235 
 236         if (!MLX5_PPS_CAP(mdev))
 237                 return -EOPNOTSUPP;
 238 
 239         /* Reject requests with unsupported flags */
 240         if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
 241                                 PTP_RISING_EDGE |
 242                                 PTP_FALLING_EDGE |
 243                                 PTP_STRICT_FLAGS))
 244                 return -EOPNOTSUPP;
 245 
 246         /* Reject requests to enable time stamping on both edges. */
 247         if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
 248             (rq->extts.flags & PTP_ENABLE_FEATURE) &&
 249             (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
 250                 return -EOPNOTSUPP;
 251 
 252         if (rq->extts.index >= clock->ptp_info.n_pins)
 253                 return -EINVAL;
 254 
 255         if (on) {
 256                 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
 257                 if (pin < 0)
 258                         return -EBUSY;
 259                 pin_mode = MLX5_PIN_MODE_IN;
 260                 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
 261                 field_select = MLX5_MTPPS_FS_PIN_MODE |
 262                                MLX5_MTPPS_FS_PATTERN |
 263                                MLX5_MTPPS_FS_ENABLE;
 264         } else {
 265                 pin = rq->extts.index;
 266                 field_select = MLX5_MTPPS_FS_ENABLE;
 267         }
 268 
 269         MLX5_SET(mtpps_reg, in, pin, pin);
 270         MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
 271         MLX5_SET(mtpps_reg, in, pattern, pattern);
 272         MLX5_SET(mtpps_reg, in, enable, on);
 273         MLX5_SET(mtpps_reg, in, field_select, field_select);
 274 
 275         err = mlx5_set_mtpps(mdev, in, sizeof(in));
 276         if (err)
 277                 return err;
 278 
 279         return mlx5_set_mtppse(mdev, pin, 0,
 280                                MLX5_EVENT_MODE_REPETETIVE & on);
 281 }
 282 
 283 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
 284                                  struct ptp_clock_request *rq,
 285                                  int on)
 286 {
 287         struct mlx5_clock *clock =
 288                         container_of(ptp, struct mlx5_clock, ptp_info);
 289         struct mlx5_core_dev *mdev =
 290                         container_of(clock, struct mlx5_core_dev, clock);
 291         u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
 292         u64 nsec_now, nsec_delta, time_stamp = 0;
 293         u64 cycles_now, cycles_delta;
 294         struct timespec64 ts;
 295         unsigned long flags;
 296         u32 field_select = 0;
 297         u8 pin_mode = 0;
 298         u8 pattern = 0;
 299         int pin = -1;
 300         int err = 0;
 301         s64 ns;
 302 
 303         if (!MLX5_PPS_CAP(mdev))
 304                 return -EOPNOTSUPP;
 305 
 306         /* Reject requests with unsupported flags */
 307         if (rq->perout.flags)
 308                 return -EOPNOTSUPP;
 309 
 310         if (rq->perout.index >= clock->ptp_info.n_pins)
 311                 return -EINVAL;
 312 
 313         if (on) {
 314                 pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
 315                                    rq->perout.index);
 316                 if (pin < 0)
 317                         return -EBUSY;
 318 
 319                 pin_mode = MLX5_PIN_MODE_OUT;
 320                 pattern = MLX5_OUT_PATTERN_PERIODIC;
 321                 ts.tv_sec = rq->perout.period.sec;
 322                 ts.tv_nsec = rq->perout.period.nsec;
 323                 ns = timespec64_to_ns(&ts);
 324 
 325                 if ((ns >> 1) != 500000000LL)
 326                         return -EINVAL;
 327 
 328                 ts.tv_sec = rq->perout.start.sec;
 329                 ts.tv_nsec = rq->perout.start.nsec;
 330                 ns = timespec64_to_ns(&ts);
 331                 cycles_now = mlx5_read_internal_timer(mdev, NULL);
 332                 write_seqlock_irqsave(&clock->lock, flags);
 333                 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
 334                 nsec_delta = ns - nsec_now;
 335                 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
 336                                          clock->cycles.mult);
 337                 write_sequnlock_irqrestore(&clock->lock, flags);
 338                 time_stamp = cycles_now + cycles_delta;
 339                 field_select = MLX5_MTPPS_FS_PIN_MODE |
 340                                MLX5_MTPPS_FS_PATTERN |
 341                                MLX5_MTPPS_FS_ENABLE |
 342                                MLX5_MTPPS_FS_TIME_STAMP;
 343         } else {
 344                 pin = rq->perout.index;
 345                 field_select = MLX5_MTPPS_FS_ENABLE;
 346         }
 347 
 348         MLX5_SET(mtpps_reg, in, pin, pin);
 349         MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
 350         MLX5_SET(mtpps_reg, in, pattern, pattern);
 351         MLX5_SET(mtpps_reg, in, enable, on);
 352         MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
 353         MLX5_SET(mtpps_reg, in, field_select, field_select);
 354 
 355         err = mlx5_set_mtpps(mdev, in, sizeof(in));
 356         if (err)
 357                 return err;
 358 
 359         return mlx5_set_mtppse(mdev, pin, 0,
 360                                MLX5_EVENT_MODE_REPETETIVE & on);
 361 }
 362 
 363 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
 364                               struct ptp_clock_request *rq,
 365                               int on)
 366 {
 367         struct mlx5_clock *clock =
 368                         container_of(ptp, struct mlx5_clock, ptp_info);
 369 
 370         clock->pps_info.enabled = !!on;
 371         return 0;
 372 }
 373 
 374 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
 375                            struct ptp_clock_request *rq,
 376                            int on)
 377 {
 378         switch (rq->type) {
 379         case PTP_CLK_REQ_EXTTS:
 380                 return mlx5_extts_configure(ptp, rq, on);
 381         case PTP_CLK_REQ_PEROUT:
 382                 return mlx5_perout_configure(ptp, rq, on);
 383         case PTP_CLK_REQ_PPS:
 384                 return mlx5_pps_configure(ptp, rq, on);
 385         default:
 386                 return -EOPNOTSUPP;
 387         }
 388         return 0;
 389 }
 390 
 391 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
 392                            enum ptp_pin_function func, unsigned int chan)
 393 {
 394         return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
 395 }
 396 
 397 static const struct ptp_clock_info mlx5_ptp_clock_info = {
 398         .owner          = THIS_MODULE,
 399         .name           = "mlx5_p2p",
 400         .max_adj        = 100000000,
 401         .n_alarm        = 0,
 402         .n_ext_ts       = 0,
 403         .n_per_out      = 0,
 404         .n_pins         = 0,
 405         .pps            = 0,
 406         .adjfreq        = mlx5_ptp_adjfreq,
 407         .adjtime        = mlx5_ptp_adjtime,
 408         .gettimex64     = mlx5_ptp_gettimex,
 409         .settime64      = mlx5_ptp_settime,
 410         .enable         = NULL,
 411         .verify         = NULL,
 412 };
 413 
 414 static int mlx5_init_pin_config(struct mlx5_clock *clock)
 415 {
 416         int i;
 417 
 418         clock->ptp_info.pin_config =
 419                         kcalloc(clock->ptp_info.n_pins,
 420                                 sizeof(*clock->ptp_info.pin_config),
 421                                 GFP_KERNEL);
 422         if (!clock->ptp_info.pin_config)
 423                 return -ENOMEM;
 424         clock->ptp_info.enable = mlx5_ptp_enable;
 425         clock->ptp_info.verify = mlx5_ptp_verify;
 426         clock->ptp_info.pps = 1;
 427 
 428         for (i = 0; i < clock->ptp_info.n_pins; i++) {
 429                 snprintf(clock->ptp_info.pin_config[i].name,
 430                          sizeof(clock->ptp_info.pin_config[i].name),
 431                          "mlx5_pps%d", i);
 432                 clock->ptp_info.pin_config[i].index = i;
 433                 clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
 434                 clock->ptp_info.pin_config[i].chan = i;
 435         }
 436 
 437         return 0;
 438 }
 439 
 440 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
 441 {
 442         struct mlx5_clock *clock = &mdev->clock;
 443         u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
 444 
 445         mlx5_query_mtpps(mdev, out, sizeof(out));
 446 
 447         clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
 448                                           cap_number_of_pps_pins);
 449         clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
 450                                             cap_max_num_of_pps_in_pins);
 451         clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
 452                                              cap_max_num_of_pps_out_pins);
 453 
 454         clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
 455         clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
 456         clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
 457         clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
 458         clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
 459         clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
 460         clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
 461         clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
 462 }
 463 
 464 static int mlx5_pps_event(struct notifier_block *nb,
 465                           unsigned long type, void *data)
 466 {
 467         struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
 468         struct mlx5_core_dev *mdev = clock->mdev;
 469         struct ptp_clock_event ptp_event;
 470         u64 cycles_now, cycles_delta;
 471         u64 nsec_now, nsec_delta, ns;
 472         struct mlx5_eqe *eqe = data;
 473         int pin = eqe->data.pps.pin;
 474         struct timespec64 ts;
 475         unsigned long flags;
 476 
 477         switch (clock->ptp_info.pin_config[pin].func) {
 478         case PTP_PF_EXTTS:
 479                 ptp_event.index = pin;
 480                 ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
 481                                         be64_to_cpu(eqe->data.pps.time_stamp));
 482                 if (clock->pps_info.enabled) {
 483                         ptp_event.type = PTP_CLOCK_PPSUSR;
 484                         ptp_event.pps_times.ts_real =
 485                                         ns_to_timespec64(ptp_event.timestamp);
 486                 } else {
 487                         ptp_event.type = PTP_CLOCK_EXTTS;
 488                 }
 489                 /* TODOL clock->ptp can be NULL if ptp_clock_register failes */
 490                 ptp_clock_event(clock->ptp, &ptp_event);
 491                 break;
 492         case PTP_PF_PEROUT:
 493                 mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
 494                 cycles_now = mlx5_read_internal_timer(mdev, NULL);
 495                 ts.tv_sec += 1;
 496                 ts.tv_nsec = 0;
 497                 ns = timespec64_to_ns(&ts);
 498                 write_seqlock_irqsave(&clock->lock, flags);
 499                 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
 500                 nsec_delta = ns - nsec_now;
 501                 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
 502                                          clock->cycles.mult);
 503                 clock->pps_info.start[pin] = cycles_now + cycles_delta;
 504                 schedule_work(&clock->pps_info.out_work);
 505                 write_sequnlock_irqrestore(&clock->lock, flags);
 506                 break;
 507         default:
 508                 mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
 509                               clock->ptp_info.pin_config[pin].func);
 510         }
 511 
 512         return NOTIFY_OK;
 513 }
 514 
 515 void mlx5_init_clock(struct mlx5_core_dev *mdev)
 516 {
 517         struct mlx5_clock *clock = &mdev->clock;
 518         u64 overflow_cycles;
 519         u64 ns;
 520         u64 frac = 0;
 521         u32 dev_freq;
 522 
 523         dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
 524         if (!dev_freq) {
 525                 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
 526                 return;
 527         }
 528         seqlock_init(&clock->lock);
 529         clock->cycles.read = read_internal_timer;
 530         clock->cycles.shift = MLX5_CYCLES_SHIFT;
 531         clock->cycles.mult = clocksource_khz2mult(dev_freq,
 532                                                   clock->cycles.shift);
 533         clock->nominal_c_mult = clock->cycles.mult;
 534         clock->cycles.mask = CLOCKSOURCE_MASK(41);
 535         clock->mdev = mdev;
 536 
 537         timecounter_init(&clock->tc, &clock->cycles,
 538                          ktime_to_ns(ktime_get_real()));
 539 
 540         /* Calculate period in seconds to call the overflow watchdog - to make
 541          * sure counter is checked at least twice every wrap around.
 542          * The period is calculated as the minimum between max HW cycles count
 543          * (The clock source mask) and max amount of cycles that can be
 544          * multiplied by clock multiplier where the result doesn't exceed
 545          * 64bits.
 546          */
 547         overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
 548         overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
 549 
 550         ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
 551                                  frac, &frac);
 552         do_div(ns, NSEC_PER_SEC / HZ);
 553         clock->overflow_period = ns;
 554 
 555         mdev->clock_info =
 556                 (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
 557         if (mdev->clock_info) {
 558                 mdev->clock_info->nsec = clock->tc.nsec;
 559                 mdev->clock_info->cycles = clock->tc.cycle_last;
 560                 mdev->clock_info->mask = clock->cycles.mask;
 561                 mdev->clock_info->mult = clock->nominal_c_mult;
 562                 mdev->clock_info->shift = clock->cycles.shift;
 563                 mdev->clock_info->frac = clock->tc.frac;
 564                 mdev->clock_info->overflow_period = clock->overflow_period;
 565         }
 566 
 567         INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
 568         INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
 569         if (clock->overflow_period)
 570                 schedule_delayed_work(&clock->overflow_work, 0);
 571         else
 572                 mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
 573 
 574         /* Configure the PHC */
 575         clock->ptp_info = mlx5_ptp_clock_info;
 576 
 577         /* Initialize 1PPS data structures */
 578         if (MLX5_PPS_CAP(mdev))
 579                 mlx5_get_pps_caps(mdev);
 580         if (clock->ptp_info.n_pins)
 581                 mlx5_init_pin_config(clock);
 582 
 583         clock->ptp = ptp_clock_register(&clock->ptp_info,
 584                                         &mdev->pdev->dev);
 585         if (IS_ERR(clock->ptp)) {
 586                 mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
 587                                PTR_ERR(clock->ptp));
 588                 clock->ptp = NULL;
 589         }
 590 
 591         MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
 592         mlx5_eq_notifier_register(mdev, &clock->pps_nb);
 593 }
 594 
 595 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
 596 {
 597         struct mlx5_clock *clock = &mdev->clock;
 598 
 599         if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
 600                 return;
 601 
 602         mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
 603         if (clock->ptp) {
 604                 ptp_clock_unregister(clock->ptp);
 605                 clock->ptp = NULL;
 606         }
 607 
 608         cancel_work_sync(&clock->pps_info.out_work);
 609         cancel_delayed_work_sync(&clock->overflow_work);
 610 
 611         if (mdev->clock_info) {
 612                 free_page((unsigned long)mdev->clock_info);
 613                 mdev->clock_info = NULL;
 614         }
 615 
 616         kfree(clock->ptp_info.pin_config);
 617 }

/* [<][>][^][v][top][bottom][index][help] */