root/drivers/misc/habanalabs/hw_queue.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. hl_hw_queue_add_ptr
  2. queue_free_slots
  3. hl_int_hw_queue_update_ci
  4. ext_queue_submit_bd
  5. ext_queue_sanity_checks
  6. int_queue_sanity_checks
  7. hl_hw_queue_send_cb_no_cmpl
  8. ext_hw_queue_schedule_job
  9. int_hw_queue_schedule_job
  10. hl_hw_queue_schedule_cs
  11. hl_hw_queue_inc_ci_kernel
  12. ext_and_cpu_hw_queue_init
  13. int_hw_queue_init
  14. cpu_hw_queue_init
  15. ext_hw_queue_init
  16. hw_queue_init
  17. hw_queue_fini
  18. hl_hw_queues_create
  19. hl_hw_queues_destroy
  20. hl_hw_queue_reset

   1 // SPDX-License-Identifier: GPL-2.0
   2 
   3 /*
   4  * Copyright 2016-2019 HabanaLabs, Ltd.
   5  * All Rights Reserved.
   6  */
   7 
   8 #include "habanalabs.h"
   9 
  10 #include <linux/slab.h>
  11 
  12 /*
  13  * hl_queue_add_ptr - add to pi or ci and checks if it wraps around
  14  *
  15  * @ptr: the current pi/ci value
  16  * @val: the amount to add
  17  *
  18  * Add val to ptr. It can go until twice the queue length.
  19  */
  20 inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val)
  21 {
  22         ptr += val;
  23         ptr &= ((HL_QUEUE_LENGTH << 1) - 1);
  24         return ptr;
  25 }
  26 
  27 static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len)
  28 {
  29         int delta = (q->pi - q->ci);
  30 
  31         if (delta >= 0)
  32                 return (queue_len - delta);
  33         else
  34                 return (abs(delta) - queue_len);
  35 }
  36 
  37 void hl_int_hw_queue_update_ci(struct hl_cs *cs)
  38 {
  39         struct hl_device *hdev = cs->ctx->hdev;
  40         struct hl_hw_queue *q;
  41         int i;
  42 
  43         hdev->asic_funcs->hw_queues_lock(hdev);
  44 
  45         if (hdev->disabled)
  46                 goto out;
  47 
  48         q = &hdev->kernel_queues[0];
  49         for (i = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
  50                 if (q->queue_type == QUEUE_TYPE_INT) {
  51                         q->ci += cs->jobs_in_queue_cnt[i];
  52                         q->ci &= ((q->int_queue_len << 1) - 1);
  53                 }
  54         }
  55 
  56 out:
  57         hdev->asic_funcs->hw_queues_unlock(hdev);
  58 }
  59 
  60 /*
  61  * ext_queue_submit_bd - Submit a buffer descriptor to an external queue
  62  *
  63  * @hdev: pointer to habanalabs device structure
  64  * @q: pointer to habanalabs queue structure
  65  * @ctl: BD's control word
  66  * @len: BD's length
  67  * @ptr: BD's pointer
  68  *
  69  * This function assumes there is enough space on the queue to submit a new
  70  * BD to it. It initializes the next BD and calls the device specific
  71  * function to set the pi (and doorbell)
  72  *
  73  * This function must be called when the scheduler mutex is taken
  74  *
  75  */
  76 static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
  77                                 u32 ctl, u32 len, u64 ptr)
  78 {
  79         struct hl_bd *bd;
  80 
  81         bd = (struct hl_bd *) (uintptr_t) q->kernel_address;
  82         bd += hl_pi_2_offset(q->pi);
  83         bd->ctl = cpu_to_le32(ctl);
  84         bd->len = cpu_to_le32(len);
  85         bd->ptr = cpu_to_le64(ptr);
  86 
  87         q->pi = hl_queue_inc_ptr(q->pi);
  88         hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
  89 }
  90 
  91 /*
  92  * ext_queue_sanity_checks - perform some sanity checks on external queue
  93  *
  94  * @hdev              : pointer to hl_device structure
  95  * @q                 : pointer to hl_hw_queue structure
  96  * @num_of_entries    : how many entries to check for space
  97  * @reserve_cq_entry  : whether to reserve an entry in the cq
  98  *
  99  * H/W queues spinlock should be taken before calling this function
 100  *
 101  * Perform the following:
 102  * - Make sure we have enough space in the h/w queue
 103  * - Make sure we have enough space in the completion queue
 104  * - Reserve space in the completion queue (needs to be reversed if there
 105  *   is a failure down the road before the actual submission of work). Only
 106  *   do this action if reserve_cq_entry is true
 107  *
 108  */
 109 static int ext_queue_sanity_checks(struct hl_device *hdev,
 110                                 struct hl_hw_queue *q, int num_of_entries,
 111                                 bool reserve_cq_entry)
 112 {
 113         atomic_t *free_slots =
 114                         &hdev->completion_queue[q->hw_queue_id].free_slots_cnt;
 115         int free_slots_cnt;
 116 
 117         /* Check we have enough space in the queue */
 118         free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH);
 119 
 120         if (free_slots_cnt < num_of_entries) {
 121                 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
 122                         q->hw_queue_id, num_of_entries);
 123                 return -EAGAIN;
 124         }
 125 
 126         if (reserve_cq_entry) {
 127                 /*
 128                  * Check we have enough space in the completion queue
 129                  * Add -1 to counter (decrement) unless counter was already 0
 130                  * In that case, CQ is full so we can't submit a new CB because
 131                  * we won't get ack on its completion
 132                  * atomic_add_unless will return 0 if counter was already 0
 133                  */
 134                 if (atomic_add_negative(num_of_entries * -1, free_slots)) {
 135                         dev_dbg(hdev->dev, "No space for %d on CQ %d\n",
 136                                 num_of_entries, q->hw_queue_id);
 137                         atomic_add(num_of_entries, free_slots);
 138                         return -EAGAIN;
 139                 }
 140         }
 141 
 142         return 0;
 143 }
 144 
 145 /*
 146  * int_queue_sanity_checks - perform some sanity checks on internal queue
 147  *
 148  * @hdev              : pointer to hl_device structure
 149  * @q                 : pointer to hl_hw_queue structure
 150  * @num_of_entries    : how many entries to check for space
 151  *
 152  * H/W queues spinlock should be taken before calling this function
 153  *
 154  * Perform the following:
 155  * - Make sure we have enough space in the h/w queue
 156  *
 157  */
 158 static int int_queue_sanity_checks(struct hl_device *hdev,
 159                                         struct hl_hw_queue *q,
 160                                         int num_of_entries)
 161 {
 162         int free_slots_cnt;
 163 
 164         /* Check we have enough space in the queue */
 165         free_slots_cnt = queue_free_slots(q, q->int_queue_len);
 166 
 167         if (free_slots_cnt < num_of_entries) {
 168                 dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n",
 169                         q->hw_queue_id, num_of_entries);
 170                 return -EAGAIN;
 171         }
 172 
 173         return 0;
 174 }
 175 
 176 /*
 177  * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion
 178  *
 179  * @hdev: pointer to hl_device structure
 180  * @hw_queue_id: Queue's type
 181  * @cb_size: size of CB
 182  * @cb_ptr: pointer to CB location
 183  *
 184  * This function sends a single CB, that must NOT generate a completion entry
 185  *
 186  */
 187 int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
 188                                 u32 cb_size, u64 cb_ptr)
 189 {
 190         struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
 191         int rc;
 192 
 193         /*
 194          * The CPU queue is a synchronous queue with an effective depth of
 195          * a single entry (although it is allocated with room for multiple
 196          * entries). Therefore, there is a different lock, called
 197          * send_cpu_message_lock, that serializes accesses to the CPU queue.
 198          * As a result, we don't need to lock the access to the entire H/W
 199          * queues module when submitting a JOB to the CPU queue
 200          */
 201         if (q->queue_type != QUEUE_TYPE_CPU)
 202                 hdev->asic_funcs->hw_queues_lock(hdev);
 203 
 204         if (hdev->disabled) {
 205                 rc = -EPERM;
 206                 goto out;
 207         }
 208 
 209         rc = ext_queue_sanity_checks(hdev, q, 1, false);
 210         if (rc)
 211                 goto out;
 212 
 213         ext_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr);
 214 
 215 out:
 216         if (q->queue_type != QUEUE_TYPE_CPU)
 217                 hdev->asic_funcs->hw_queues_unlock(hdev);
 218 
 219         return rc;
 220 }
 221 
 222 /*
 223  * ext_hw_queue_schedule_job - submit an JOB to an external queue
 224  *
 225  * @job: pointer to the job that needs to be submitted to the queue
 226  *
 227  * This function must be called when the scheduler mutex is taken
 228  *
 229  */
 230 static void ext_hw_queue_schedule_job(struct hl_cs_job *job)
 231 {
 232         struct hl_device *hdev = job->cs->ctx->hdev;
 233         struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
 234         struct hl_cq_entry cq_pkt;
 235         struct hl_cq *cq;
 236         u64 cq_addr;
 237         struct hl_cb *cb;
 238         u32 ctl;
 239         u32 len;
 240         u64 ptr;
 241 
 242         /*
 243          * Update the JOB ID inside the BD CTL so the device would know what
 244          * to write in the completion queue
 245          */
 246         ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK);
 247 
 248         cb = job->patched_cb;
 249         len = job->job_cb_size;
 250         ptr = cb->bus_address;
 251 
 252         cq_pkt.data = cpu_to_le32(
 253                                 ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT)
 254                                         & CQ_ENTRY_SHADOW_INDEX_MASK) |
 255                                 (1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) |
 256                                 (1 << CQ_ENTRY_READY_SHIFT));
 257 
 258         /*
 259          * No need to protect pi_offset because scheduling to the
 260          * H/W queues is done under the scheduler mutex
 261          *
 262          * No need to check if CQ is full because it was already
 263          * checked in hl_queue_sanity_checks
 264          */
 265         cq = &hdev->completion_queue[q->hw_queue_id];
 266         cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
 267 
 268         hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
 269                                                 cq_addr,
 270                                                 le32_to_cpu(cq_pkt.data),
 271                                                 q->hw_queue_id);
 272 
 273         q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
 274 
 275         cq->pi = hl_cq_inc_ptr(cq->pi);
 276 
 277         ext_queue_submit_bd(hdev, q, ctl, len, ptr);
 278 }
 279 
 280 /*
 281  * int_hw_queue_schedule_job - submit an JOB to an internal queue
 282  *
 283  * @job: pointer to the job that needs to be submitted to the queue
 284  *
 285  * This function must be called when the scheduler mutex is taken
 286  *
 287  */
 288 static void int_hw_queue_schedule_job(struct hl_cs_job *job)
 289 {
 290         struct hl_device *hdev = job->cs->ctx->hdev;
 291         struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
 292         struct hl_bd bd;
 293         __le64 *pi;
 294 
 295         bd.ctl = 0;
 296         bd.len = cpu_to_le32(job->job_cb_size);
 297         bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
 298 
 299         pi = (__le64 *) (uintptr_t) (q->kernel_address +
 300                 ((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
 301 
 302         q->pi++;
 303         q->pi &= ((q->int_queue_len << 1) - 1);
 304 
 305         hdev->asic_funcs->pqe_write(hdev, pi, &bd);
 306 
 307         hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
 308 }
 309 
 310 /*
 311  * hl_hw_queue_schedule_cs - schedule a command submission
 312  *
 313  * @job        : pointer to the CS
 314  *
 315  */
 316 int hl_hw_queue_schedule_cs(struct hl_cs *cs)
 317 {
 318         struct hl_device *hdev = cs->ctx->hdev;
 319         struct hl_cs_job *job, *tmp;
 320         struct hl_hw_queue *q;
 321         int rc = 0, i, cq_cnt;
 322 
 323         hdev->asic_funcs->hw_queues_lock(hdev);
 324 
 325         if (hl_device_disabled_or_in_reset(hdev)) {
 326                 dev_err(hdev->dev,
 327                         "device is disabled or in reset, CS rejected!\n");
 328                 rc = -EPERM;
 329                 goto out;
 330         }
 331 
 332         q = &hdev->kernel_queues[0];
 333         /* This loop assumes all external queues are consecutive */
 334         for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) {
 335                 if (q->queue_type == QUEUE_TYPE_EXT) {
 336                         if (cs->jobs_in_queue_cnt[i]) {
 337                                 rc = ext_queue_sanity_checks(hdev, q,
 338                                         cs->jobs_in_queue_cnt[i], true);
 339                                 if (rc)
 340                                         goto unroll_cq_resv;
 341                                 cq_cnt++;
 342                         }
 343                 } else if (q->queue_type == QUEUE_TYPE_INT) {
 344                         if (cs->jobs_in_queue_cnt[i]) {
 345                                 rc = int_queue_sanity_checks(hdev, q,
 346                                         cs->jobs_in_queue_cnt[i]);
 347                                 if (rc)
 348                                         goto unroll_cq_resv;
 349                         }
 350                 }
 351         }
 352 
 353         spin_lock(&hdev->hw_queues_mirror_lock);
 354         list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list);
 355 
 356         /* Queue TDR if the CS is the first entry and if timeout is wanted */
 357         if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) &&
 358                         (list_first_entry(&hdev->hw_queues_mirror_list,
 359                                         struct hl_cs, mirror_node) == cs)) {
 360                 cs->tdr_active = true;
 361                 schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies);
 362                 spin_unlock(&hdev->hw_queues_mirror_lock);
 363         } else {
 364                 spin_unlock(&hdev->hw_queues_mirror_lock);
 365         }
 366 
 367         if (!hdev->cs_active_cnt++) {
 368                 struct hl_device_idle_busy_ts *ts;
 369 
 370                 ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx];
 371                 ts->busy_to_idle_ts = ktime_set(0, 0);
 372                 ts->idle_to_busy_ts = ktime_get();
 373         }
 374 
 375         list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
 376                 if (job->ext_queue)
 377                         ext_hw_queue_schedule_job(job);
 378                 else
 379                         int_hw_queue_schedule_job(job);
 380 
 381         cs->submitted = true;
 382 
 383         goto out;
 384 
 385 unroll_cq_resv:
 386         /* This loop assumes all external queues are consecutive */
 387         q = &hdev->kernel_queues[0];
 388         for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) {
 389                 if ((q->queue_type == QUEUE_TYPE_EXT) &&
 390                                 (cs->jobs_in_queue_cnt[i])) {
 391                         atomic_t *free_slots =
 392                                 &hdev->completion_queue[i].free_slots_cnt;
 393                         atomic_add(cs->jobs_in_queue_cnt[i], free_slots);
 394                         cq_cnt--;
 395                 }
 396         }
 397 
 398 out:
 399         hdev->asic_funcs->hw_queues_unlock(hdev);
 400 
 401         return rc;
 402 }
 403 
 404 /*
 405  * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue
 406  *
 407  * @hdev: pointer to hl_device structure
 408  * @hw_queue_id: which queue to increment its ci
 409  */
 410 void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id)
 411 {
 412         struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id];
 413 
 414         q->ci = hl_queue_inc_ptr(q->ci);
 415 }
 416 
 417 static int ext_and_cpu_hw_queue_init(struct hl_device *hdev,
 418                                 struct hl_hw_queue *q, bool is_cpu_queue)
 419 {
 420         void *p;
 421         int rc;
 422 
 423         if (is_cpu_queue)
 424                 p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
 425                                                         HL_QUEUE_SIZE_IN_BYTES,
 426                                                         &q->bus_address);
 427         else
 428                 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
 429                                                 HL_QUEUE_SIZE_IN_BYTES,
 430                                                 &q->bus_address,
 431                                                 GFP_KERNEL | __GFP_ZERO);
 432         if (!p)
 433                 return -ENOMEM;
 434 
 435         q->kernel_address = (u64) (uintptr_t) p;
 436 
 437         q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH,
 438                                         sizeof(*q->shadow_queue),
 439                                         GFP_KERNEL);
 440         if (!q->shadow_queue) {
 441                 dev_err(hdev->dev,
 442                         "Failed to allocate shadow queue for H/W queue %d\n",
 443                         q->hw_queue_id);
 444                 rc = -ENOMEM;
 445                 goto free_queue;
 446         }
 447 
 448         /* Make sure read/write pointers are initialized to start of queue */
 449         q->ci = 0;
 450         q->pi = 0;
 451 
 452         return 0;
 453 
 454 free_queue:
 455         if (is_cpu_queue)
 456                 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
 457                                         HL_QUEUE_SIZE_IN_BYTES,
 458                                         (void *) (uintptr_t) q->kernel_address);
 459         else
 460                 hdev->asic_funcs->asic_dma_free_coherent(hdev,
 461                                         HL_QUEUE_SIZE_IN_BYTES,
 462                                         (void *) (uintptr_t) q->kernel_address,
 463                                         q->bus_address);
 464 
 465         return rc;
 466 }
 467 
 468 static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
 469 {
 470         void *p;
 471 
 472         p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id,
 473                                         &q->bus_address, &q->int_queue_len);
 474         if (!p) {
 475                 dev_err(hdev->dev,
 476                         "Failed to get base address for internal queue %d\n",
 477                         q->hw_queue_id);
 478                 return -EFAULT;
 479         }
 480 
 481         q->kernel_address = (u64) (uintptr_t) p;
 482         q->pi = 0;
 483         q->ci = 0;
 484 
 485         return 0;
 486 }
 487 
 488 static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
 489 {
 490         return ext_and_cpu_hw_queue_init(hdev, q, true);
 491 }
 492 
 493 static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
 494 {
 495         return ext_and_cpu_hw_queue_init(hdev, q, false);
 496 }
 497 
 498 /*
 499  * hw_queue_init - main initialization function for H/W queue object
 500  *
 501  * @hdev: pointer to hl_device device structure
 502  * @q: pointer to hl_hw_queue queue structure
 503  * @hw_queue_id: The id of the H/W queue
 504  *
 505  * Allocate dma-able memory for the queue and initialize fields
 506  * Returns 0 on success
 507  */
 508 static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
 509                         u32 hw_queue_id)
 510 {
 511         int rc;
 512 
 513         BUILD_BUG_ON(HL_QUEUE_SIZE_IN_BYTES > HL_PAGE_SIZE);
 514 
 515         q->hw_queue_id = hw_queue_id;
 516 
 517         switch (q->queue_type) {
 518         case QUEUE_TYPE_EXT:
 519                 rc = ext_hw_queue_init(hdev, q);
 520                 break;
 521 
 522         case QUEUE_TYPE_INT:
 523                 rc = int_hw_queue_init(hdev, q);
 524                 break;
 525 
 526         case QUEUE_TYPE_CPU:
 527                 rc = cpu_hw_queue_init(hdev, q);
 528                 break;
 529 
 530         case QUEUE_TYPE_NA:
 531                 q->valid = 0;
 532                 return 0;
 533 
 534         default:
 535                 dev_crit(hdev->dev, "wrong queue type %d during init\n",
 536                         q->queue_type);
 537                 rc = -EINVAL;
 538                 break;
 539         }
 540 
 541         if (rc)
 542                 return rc;
 543 
 544         q->valid = 1;
 545 
 546         return 0;
 547 }
 548 
 549 /*
 550  * hw_queue_fini - destroy queue
 551  *
 552  * @hdev: pointer to hl_device device structure
 553  * @q: pointer to hl_hw_queue queue structure
 554  *
 555  * Free the queue memory
 556  */
 557 static void hw_queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
 558 {
 559         if (!q->valid)
 560                 return;
 561 
 562         /*
 563          * If we arrived here, there are no jobs waiting on this queue
 564          * so we can safely remove it.
 565          * This is because this function can only called when:
 566          * 1. Either a context is deleted, which only can occur if all its
 567          *    jobs were finished
 568          * 2. A context wasn't able to be created due to failure or timeout,
 569          *    which means there are no jobs on the queue yet
 570          *
 571          * The only exception are the queues of the kernel context, but
 572          * if they are being destroyed, it means that the entire module is
 573          * being removed. If the module is removed, it means there is no open
 574          * user context. It also means that if a job was submitted by
 575          * the kernel driver (e.g. context creation), the job itself was
 576          * released by the kernel driver when a timeout occurred on its
 577          * Completion. Thus, we don't need to release it again.
 578          */
 579 
 580         if (q->queue_type == QUEUE_TYPE_INT)
 581                 return;
 582 
 583         kfree(q->shadow_queue);
 584 
 585         if (q->queue_type == QUEUE_TYPE_CPU)
 586                 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
 587                                         HL_QUEUE_SIZE_IN_BYTES,
 588                                         (void *) (uintptr_t) q->kernel_address);
 589         else
 590                 hdev->asic_funcs->asic_dma_free_coherent(hdev,
 591                                         HL_QUEUE_SIZE_IN_BYTES,
 592                                         (void *) (uintptr_t) q->kernel_address,
 593                                         q->bus_address);
 594 }
 595 
 596 int hl_hw_queues_create(struct hl_device *hdev)
 597 {
 598         struct asic_fixed_properties *asic = &hdev->asic_prop;
 599         struct hl_hw_queue *q;
 600         int i, rc, q_ready_cnt;
 601 
 602         hdev->kernel_queues = kcalloc(HL_MAX_QUEUES,
 603                                 sizeof(*hdev->kernel_queues), GFP_KERNEL);
 604 
 605         if (!hdev->kernel_queues) {
 606                 dev_err(hdev->dev, "Not enough memory for H/W queues\n");
 607                 return -ENOMEM;
 608         }
 609 
 610         /* Initialize the H/W queues */
 611         for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues;
 612                         i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) {
 613 
 614                 q->queue_type = asic->hw_queues_props[i].type;
 615                 rc = hw_queue_init(hdev, q, i);
 616                 if (rc) {
 617                         dev_err(hdev->dev,
 618                                 "failed to initialize queue %d\n", i);
 619                         goto release_queues;
 620                 }
 621         }
 622 
 623         return 0;
 624 
 625 release_queues:
 626         for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++)
 627                 hw_queue_fini(hdev, q);
 628 
 629         kfree(hdev->kernel_queues);
 630 
 631         return rc;
 632 }
 633 
 634 void hl_hw_queues_destroy(struct hl_device *hdev)
 635 {
 636         struct hl_hw_queue *q;
 637         int i;
 638 
 639         for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++)
 640                 hw_queue_fini(hdev, q);
 641 
 642         kfree(hdev->kernel_queues);
 643 }
 644 
 645 void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset)
 646 {
 647         struct hl_hw_queue *q;
 648         int i;
 649 
 650         for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) {
 651                 if ((!q->valid) ||
 652                         ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU)))
 653                         continue;
 654                 q->pi = q->ci = 0;
 655         }
 656 }

/* [<][>][^][v][top][bottom][index][help] */