root/drivers/net/ethernet/cavium/liquidio/request_manager.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. IQ_INSTR_MODE_64B
  2. octeon_init_instr_queue
  3. octeon_delete_instr_queue
  4. octeon_setup_iq
  5. lio_wait_for_instr_fetch
  6. ring_doorbell
  7. octeon_ring_doorbell_locked
  8. __copy_cmd_into_iq
  9. __post_command2
  10. octeon_register_reqtype_free_fn
  11. __add_to_request_list
  12. lio_process_iq_request_list
  13. octeon_flush_iq
  14. __check_db_timeout
  15. check_db_timeout
  16. octeon_send_command
  17. octeon_prepare_soft_command
  18. octeon_send_soft_command
  19. octeon_setup_sc_buffer_pool
  20. octeon_free_sc_done_list
  21. octeon_free_sc_zombie_list
  22. octeon_free_sc_buffer_pool
  23. octeon_alloc_soft_command
  24. octeon_free_soft_command

   1 /**********************************************************************
   2  * Author: Cavium, Inc.
   3  *
   4  * Contact: support@cavium.com
   5  *          Please include "LiquidIO" in the subject.
   6  *
   7  * Copyright (c) 2003-2016 Cavium, Inc.
   8  *
   9  * This file is free software; you can redistribute it and/or modify
  10  * it under the terms of the GNU General Public License, Version 2, as
  11  * published by the Free Software Foundation.
  12  *
  13  * This file is distributed in the hope that it will be useful, but
  14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16  * NONINFRINGEMENT.  See the GNU General Public License for more
  17  * details.
  18  **********************************************************************/
  19 #include <linux/pci.h>
  20 #include <linux/netdevice.h>
  21 #include <linux/vmalloc.h>
  22 #include "liquidio_common.h"
  23 #include "octeon_droq.h"
  24 #include "octeon_iq.h"
  25 #include "response_manager.h"
  26 #include "octeon_device.h"
  27 #include "octeon_main.h"
  28 #include "octeon_network.h"
  29 #include "cn66xx_device.h"
  30 #include "cn23xx_pf_device.h"
  31 #include "cn23xx_vf_device.h"
  32 
  33 struct iq_post_status {
  34         int status;
  35         int index;
  36 };
  37 
  38 static void check_db_timeout(struct work_struct *work);
  39 static void  __check_db_timeout(struct octeon_device *oct, u64 iq_no);
  40 
  41 static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
  42 
  43 static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
  44 {
  45         struct octeon_instr_queue *iq =
  46             (struct octeon_instr_queue *)oct->instr_queue[iq_no];
  47         return iq->iqcmd_64B;
  48 }
  49 
  50 #define IQ_INSTR_MODE_32B(oct, iq_no)  (!IQ_INSTR_MODE_64B(oct, iq_no))
  51 
  52 /* Define this to return the request status comaptible to old code */
  53 /*#define OCTEON_USE_OLD_REQ_STATUS*/
  54 
  55 /* Return 0 on success, 1 on failure */
  56 int octeon_init_instr_queue(struct octeon_device *oct,
  57                             union oct_txpciq txpciq,
  58                             u32 num_descs)
  59 {
  60         struct octeon_instr_queue *iq;
  61         struct octeon_iq_config *conf = NULL;
  62         u32 iq_no = (u32)txpciq.s.q_no;
  63         u32 q_size;
  64         struct cavium_wq *db_wq;
  65         int numa_node = dev_to_node(&oct->pci_dev->dev);
  66 
  67         if (OCTEON_CN6XXX(oct))
  68                 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
  69         else if (OCTEON_CN23XX_PF(oct))
  70                 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf)));
  71         else if (OCTEON_CN23XX_VF(oct))
  72                 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_vf)));
  73 
  74         if (!conf) {
  75                 dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
  76                         oct->chip_id);
  77                 return 1;
  78         }
  79 
  80         q_size = (u32)conf->instr_type * num_descs;
  81 
  82         iq = oct->instr_queue[iq_no];
  83 
  84         iq->oct_dev = oct;
  85 
  86         iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma);
  87         if (!iq->base_addr) {
  88                 dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
  89                         iq_no);
  90                 return 1;
  91         }
  92 
  93         iq->max_count = num_descs;
  94 
  95         /* Initialize a list to holds requests that have been posted to Octeon
  96          * but has yet to be fetched by octeon
  97          */
  98         iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
  99                                                numa_node);
 100         if (!iq->request_list)
 101                 iq->request_list =
 102                         vmalloc(array_size(num_descs,
 103                                            sizeof(*iq->request_list)));
 104         if (!iq->request_list) {
 105                 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
 106                 dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
 107                         iq_no);
 108                 return 1;
 109         }
 110 
 111         memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
 112 
 113         dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n",
 114                 iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count);
 115 
 116         iq->txpciq.u64 = txpciq.u64;
 117         iq->fill_threshold = (u32)conf->db_min;
 118         iq->fill_cnt = 0;
 119         iq->host_write_index = 0;
 120         iq->octeon_read_index = 0;
 121         iq->flush_index = 0;
 122         iq->last_db_time = 0;
 123         iq->do_auto_flush = 1;
 124         iq->db_timeout = (u32)conf->db_timeout;
 125         atomic_set(&iq->instr_pending, 0);
 126         iq->pkts_processed = 0;
 127 
 128         /* Initialize the spinlock for this instruction queue */
 129         spin_lock_init(&iq->lock);
 130         if (iq_no == 0) {
 131                 iq->allow_soft_cmds = true;
 132                 spin_lock_init(&iq->post_lock);
 133         } else {
 134                 iq->allow_soft_cmds = false;
 135         }
 136 
 137         spin_lock_init(&iq->iq_flush_running_lock);
 138 
 139         oct->io_qmask.iq |= BIT_ULL(iq_no);
 140 
 141         /* Set the 32B/64B mode for each input queue */
 142         oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
 143         iq->iqcmd_64B = (conf->instr_type == 64);
 144 
 145         oct->fn_list.setup_iq_regs(oct, iq_no);
 146 
 147         oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
 148                                                      WQ_MEM_RECLAIM,
 149                                                      0);
 150         if (!oct->check_db_wq[iq_no].wq) {
 151                 vfree(iq->request_list);
 152                 iq->request_list = NULL;
 153                 lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
 154                 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
 155                         iq_no);
 156                 return 1;
 157         }
 158 
 159         db_wq = &oct->check_db_wq[iq_no];
 160 
 161         INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
 162         db_wq->wk.ctxptr = oct;
 163         db_wq->wk.ctxul = iq_no;
 164         queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
 165 
 166         return 0;
 167 }
 168 
 169 int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
 170 {
 171         u64 desc_size = 0, q_size;
 172         struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
 173 
 174         cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
 175         destroy_workqueue(oct->check_db_wq[iq_no].wq);
 176 
 177         if (OCTEON_CN6XXX(oct))
 178                 desc_size =
 179                     CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn6xxx));
 180         else if (OCTEON_CN23XX_PF(oct))
 181                 desc_size =
 182                     CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_pf));
 183         else if (OCTEON_CN23XX_VF(oct))
 184                 desc_size =
 185                     CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_vf));
 186 
 187         vfree(iq->request_list);
 188 
 189         if (iq->base_addr) {
 190                 q_size = iq->max_count * desc_size;
 191                 lio_dma_free(oct, (u32)q_size, iq->base_addr,
 192                              iq->base_addr_dma);
 193                 oct->io_qmask.iq &= ~(1ULL << iq_no);
 194                 vfree(oct->instr_queue[iq_no]);
 195                 oct->instr_queue[iq_no] = NULL;
 196                 oct->num_iqs--;
 197                 return 0;
 198         }
 199         return 1;
 200 }
 201 
 202 /* Return 0 on success, 1 on failure */
 203 int octeon_setup_iq(struct octeon_device *oct,
 204                     int ifidx,
 205                     int q_index,
 206                     union oct_txpciq txpciq,
 207                     u32 num_descs,
 208                     void *app_ctx)
 209 {
 210         u32 iq_no = (u32)txpciq.s.q_no;
 211         int numa_node = dev_to_node(&oct->pci_dev->dev);
 212 
 213         if (oct->instr_queue[iq_no]) {
 214                 dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
 215                         iq_no);
 216                 oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
 217                 oct->instr_queue[iq_no]->app_ctx = app_ctx;
 218                 return 0;
 219         }
 220         oct->instr_queue[iq_no] =
 221             vzalloc_node(sizeof(struct octeon_instr_queue), numa_node);
 222         if (!oct->instr_queue[iq_no])
 223                 oct->instr_queue[iq_no] =
 224                     vzalloc(sizeof(struct octeon_instr_queue));
 225         if (!oct->instr_queue[iq_no])
 226                 return 1;
 227 
 228 
 229         oct->instr_queue[iq_no]->q_index = q_index;
 230         oct->instr_queue[iq_no]->app_ctx = app_ctx;
 231         oct->instr_queue[iq_no]->ifidx = ifidx;
 232 
 233         if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
 234                 vfree(oct->instr_queue[iq_no]);
 235                 oct->instr_queue[iq_no] = NULL;
 236                 return 1;
 237         }
 238 
 239         oct->num_iqs++;
 240         if (oct->fn_list.enable_io_queues(oct)) {
 241                 octeon_delete_instr_queue(oct, iq_no);
 242                 return 1;
 243         }
 244 
 245         return 0;
 246 }
 247 
 248 int lio_wait_for_instr_fetch(struct octeon_device *oct)
 249 {
 250         int i, retry = 1000, pending, instr_cnt = 0;
 251 
 252         do {
 253                 instr_cnt = 0;
 254 
 255                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
 256                         if (!(oct->io_qmask.iq & BIT_ULL(i)))
 257                                 continue;
 258                         pending =
 259                             atomic_read(&oct->instr_queue[i]->instr_pending);
 260                         if (pending)
 261                                 __check_db_timeout(oct, i);
 262                         instr_cnt += pending;
 263                 }
 264 
 265                 if (instr_cnt == 0)
 266                         break;
 267 
 268                 schedule_timeout_uninterruptible(1);
 269 
 270         } while (retry-- && instr_cnt);
 271 
 272         return instr_cnt;
 273 }
 274 
 275 static inline void
 276 ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
 277 {
 278         if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
 279                 writel(iq->fill_cnt, iq->doorbell_reg);
 280                 /* make sure doorbell write goes through */
 281                 iq->fill_cnt = 0;
 282                 iq->last_db_time = jiffies;
 283                 return;
 284         }
 285 }
 286 
 287 void
 288 octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no)
 289 {
 290         struct octeon_instr_queue *iq;
 291 
 292         iq = oct->instr_queue[iq_no];
 293         spin_lock(&iq->post_lock);
 294         if (iq->fill_cnt)
 295                 ring_doorbell(oct, iq);
 296         spin_unlock(&iq->post_lock);
 297 }
 298 
 299 static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
 300                                       u8 *cmd)
 301 {
 302         u8 *iqptr, cmdsize;
 303 
 304         cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
 305         iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
 306 
 307         memcpy(iqptr, cmd, cmdsize);
 308 }
 309 
 310 static inline struct iq_post_status
 311 __post_command2(struct octeon_instr_queue *iq, u8 *cmd)
 312 {
 313         struct iq_post_status st;
 314 
 315         st.status = IQ_SEND_OK;
 316 
 317         /* This ensures that the read index does not wrap around to the same
 318          * position if queue gets full before Octeon could fetch any instr.
 319          */
 320         if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
 321                 st.status = IQ_SEND_FAILED;
 322                 st.index = -1;
 323                 return st;
 324         }
 325 
 326         if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
 327                 st.status = IQ_SEND_STOP;
 328 
 329         __copy_cmd_into_iq(iq, cmd);
 330 
 331         /* "index" is returned, host_write_index is modified. */
 332         st.index = iq->host_write_index;
 333         iq->host_write_index = incr_index(iq->host_write_index, 1,
 334                                           iq->max_count);
 335         iq->fill_cnt++;
 336 
 337         /* Flush the command into memory. We need to be sure the data is in
 338          * memory before indicating that the instruction is pending.
 339          */
 340         wmb();
 341 
 342         atomic_inc(&iq->instr_pending);
 343 
 344         return st;
 345 }
 346 
 347 int
 348 octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
 349                                 void (*fn)(void *))
 350 {
 351         if (reqtype > REQTYPE_LAST) {
 352                 dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
 353                         __func__, reqtype);
 354                 return -EINVAL;
 355         }
 356 
 357         reqtype_free_fn[oct->octeon_id][reqtype] = fn;
 358 
 359         return 0;
 360 }
 361 
 362 static inline void
 363 __add_to_request_list(struct octeon_instr_queue *iq,
 364                       int idx, void *buf, int reqtype)
 365 {
 366         iq->request_list[idx].buf = buf;
 367         iq->request_list[idx].reqtype = reqtype;
 368 }
 369 
 370 /* Can only run in process context */
 371 int
 372 lio_process_iq_request_list(struct octeon_device *oct,
 373                             struct octeon_instr_queue *iq, u32 napi_budget)
 374 {
 375         struct cavium_wq *cwq = &oct->dma_comp_wq;
 376         int reqtype;
 377         void *buf;
 378         u32 old = iq->flush_index;
 379         u32 inst_count = 0;
 380         unsigned int pkts_compl = 0, bytes_compl = 0;
 381         struct octeon_soft_command *sc;
 382         unsigned long flags;
 383 
 384         while (old != iq->octeon_read_index) {
 385                 reqtype = iq->request_list[old].reqtype;
 386                 buf     = iq->request_list[old].buf;
 387 
 388                 if (reqtype == REQTYPE_NONE)
 389                         goto skip_this;
 390 
 391                 octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
 392                                                      &bytes_compl);
 393 
 394                 switch (reqtype) {
 395                 case REQTYPE_NORESP_NET:
 396                 case REQTYPE_NORESP_NET_SG:
 397                 case REQTYPE_RESP_NET_SG:
 398                         reqtype_free_fn[oct->octeon_id][reqtype](buf);
 399                         break;
 400                 case REQTYPE_RESP_NET:
 401                 case REQTYPE_SOFT_COMMAND:
 402                         sc = buf;
 403                         /* We're expecting a response from Octeon.
 404                          * It's up to lio_process_ordered_list() to
 405                          * process  sc. Add sc to the ordered soft
 406                          * command response list because we expect
 407                          * a response from Octeon.
 408                          */
 409                         spin_lock_irqsave(&oct->response_list
 410                                           [OCTEON_ORDERED_SC_LIST].lock, flags);
 411                         atomic_inc(&oct->response_list
 412                                    [OCTEON_ORDERED_SC_LIST].pending_req_count);
 413                         list_add_tail(&sc->node, &oct->response_list
 414                                 [OCTEON_ORDERED_SC_LIST].head);
 415                         spin_unlock_irqrestore(&oct->response_list
 416                                                [OCTEON_ORDERED_SC_LIST].lock,
 417                                                flags);
 418                         break;
 419                 default:
 420                         dev_err(&oct->pci_dev->dev,
 421                                 "%s Unknown reqtype: %d buf: %p at idx %d\n",
 422                                 __func__, reqtype, buf, old);
 423                 }
 424 
 425                 iq->request_list[old].buf = NULL;
 426                 iq->request_list[old].reqtype = 0;
 427 
 428  skip_this:
 429                 inst_count++;
 430                 old = incr_index(old, 1, iq->max_count);
 431 
 432                 if ((napi_budget) && (inst_count >= napi_budget))
 433                         break;
 434         }
 435         if (bytes_compl)
 436                 octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
 437                                                    bytes_compl);
 438         iq->flush_index = old;
 439 
 440         if (atomic_read(&oct->response_list
 441                         [OCTEON_ORDERED_SC_LIST].pending_req_count))
 442                 queue_work(cwq->wq, &cwq->wk.work.work);
 443 
 444         return inst_count;
 445 }
 446 
 447 /* Can only be called from process context */
 448 int
 449 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
 450                 u32 napi_budget)
 451 {
 452         u32 inst_processed = 0;
 453         u32 tot_inst_processed = 0;
 454         int tx_done = 1;
 455 
 456         if (!spin_trylock(&iq->iq_flush_running_lock))
 457                 return tx_done;
 458 
 459         spin_lock_bh(&iq->lock);
 460 
 461         iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
 462 
 463         do {
 464                 /* Process any outstanding IQ packets. */
 465                 if (iq->flush_index == iq->octeon_read_index)
 466                         break;
 467 
 468                 if (napi_budget)
 469                         inst_processed =
 470                                 lio_process_iq_request_list(oct, iq,
 471                                                             napi_budget -
 472                                                             tot_inst_processed);
 473                 else
 474                         inst_processed =
 475                                 lio_process_iq_request_list(oct, iq, 0);
 476 
 477                 if (inst_processed) {
 478                         iq->pkts_processed += inst_processed;
 479                         atomic_sub(inst_processed, &iq->instr_pending);
 480                         iq->stats.instr_processed += inst_processed;
 481                 }
 482 
 483                 tot_inst_processed += inst_processed;
 484         } while (tot_inst_processed < napi_budget);
 485 
 486         if (napi_budget && (tot_inst_processed >= napi_budget))
 487                 tx_done = 0;
 488 
 489         iq->last_db_time = jiffies;
 490 
 491         spin_unlock_bh(&iq->lock);
 492 
 493         spin_unlock(&iq->iq_flush_running_lock);
 494 
 495         return tx_done;
 496 }
 497 
 498 /* Process instruction queue after timeout.
 499  * This routine gets called from a workqueue or when removing the module.
 500  */
 501 static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
 502 {
 503         struct octeon_instr_queue *iq;
 504         u64 next_time;
 505 
 506         if (!oct)
 507                 return;
 508 
 509         iq = oct->instr_queue[iq_no];
 510         if (!iq)
 511                 return;
 512 
 513         /* return immediately, if no work pending */
 514         if (!atomic_read(&iq->instr_pending))
 515                 return;
 516         /* If jiffies - last_db_time < db_timeout do nothing  */
 517         next_time = iq->last_db_time + iq->db_timeout;
 518         if (!time_after(jiffies, (unsigned long)next_time))
 519                 return;
 520         iq->last_db_time = jiffies;
 521 
 522         /* Flush the instruction queue */
 523         octeon_flush_iq(oct, iq, 0);
 524 
 525         lio_enable_irq(NULL, iq);
 526 }
 527 
 528 /* Called by the Poll thread at regular intervals to check the instruction
 529  * queue for commands to be posted and for commands that were fetched by Octeon.
 530  */
 531 static void check_db_timeout(struct work_struct *work)
 532 {
 533         struct cavium_wk *wk = (struct cavium_wk *)work;
 534         struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
 535         u64 iq_no = wk->ctxul;
 536         struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
 537         u32 delay = 10;
 538 
 539         __check_db_timeout(oct, iq_no);
 540         queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
 541 }
 542 
 543 int
 544 octeon_send_command(struct octeon_device *oct, u32 iq_no,
 545                     u32 force_db, void *cmd, void *buf,
 546                     u32 datasize, u32 reqtype)
 547 {
 548         int xmit_stopped;
 549         struct iq_post_status st;
 550         struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
 551 
 552         /* Get the lock and prevent other tasks and tx interrupt handler from
 553          * running.
 554          */
 555         if (iq->allow_soft_cmds)
 556                 spin_lock_bh(&iq->post_lock);
 557 
 558         st = __post_command2(iq, cmd);
 559 
 560         if (st.status != IQ_SEND_FAILED) {
 561                 xmit_stopped = octeon_report_sent_bytes_to_bql(buf, reqtype);
 562                 __add_to_request_list(iq, st.index, buf, reqtype);
 563                 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
 564                 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
 565 
 566                 if (iq->fill_cnt >= MAX_OCTEON_FILL_COUNT || force_db ||
 567                     xmit_stopped || st.status == IQ_SEND_STOP)
 568                         ring_doorbell(oct, iq);
 569         } else {
 570                 INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
 571         }
 572 
 573         if (iq->allow_soft_cmds)
 574                 spin_unlock_bh(&iq->post_lock);
 575 
 576         /* This is only done here to expedite packets being flushed
 577          * for cases where there are no IQ completion interrupts.
 578          */
 579 
 580         return st.status;
 581 }
 582 
 583 void
 584 octeon_prepare_soft_command(struct octeon_device *oct,
 585                             struct octeon_soft_command *sc,
 586                             u8 opcode,
 587                             u8 subcode,
 588                             u32 irh_ossp,
 589                             u64 ossp0,
 590                             u64 ossp1)
 591 {
 592         struct octeon_config *oct_cfg;
 593         struct octeon_instr_ih2 *ih2;
 594         struct octeon_instr_ih3 *ih3;
 595         struct octeon_instr_pki_ih3 *pki_ih3;
 596         struct octeon_instr_irh *irh;
 597         struct octeon_instr_rdp *rdp;
 598 
 599         WARN_ON(opcode > 15);
 600         WARN_ON(subcode > 127);
 601 
 602         oct_cfg = octeon_get_conf(oct);
 603 
 604         if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
 605                 ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
 606 
 607                 ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
 608 
 609                 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
 610 
 611                 pki_ih3->w           = 1;
 612                 pki_ih3->raw         = 1;
 613                 pki_ih3->utag        = 1;
 614                 pki_ih3->uqpg        =
 615                         oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
 616                 pki_ih3->utt         = 1;
 617                 pki_ih3->tag     = LIO_CONTROL;
 618                 pki_ih3->tagtype = ATOMIC_TAG;
 619                 pki_ih3->qpg         =
 620                         oct->instr_queue[sc->iq_no]->txpciq.s.ctrl_qpg;
 621 
 622                 pki_ih3->pm          = 0x7;
 623                 pki_ih3->sl          = 8;
 624 
 625                 if (sc->datasize)
 626                         ih3->dlengsz = sc->datasize;
 627 
 628                 irh            = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
 629                 irh->opcode    = opcode;
 630                 irh->subcode   = subcode;
 631 
 632                 /* opcode/subcode specific parameters (ossp) */
 633                 irh->ossp       = irh_ossp;
 634                 sc->cmd.cmd3.ossp[0] = ossp0;
 635                 sc->cmd.cmd3.ossp[1] = ossp1;
 636 
 637                 if (sc->rdatasize) {
 638                         rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
 639                         rdp->pcie_port = oct->pcie_port;
 640                         rdp->rlen      = sc->rdatasize;
 641 
 642                         irh->rflag =  1;
 643                         /*PKI IH3*/
 644                         /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
 645                         ih3->fsz    = LIO_SOFTCMDRESP_IH3;
 646                 } else {
 647                         irh->rflag =  0;
 648                         /*PKI IH3*/
 649                         /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
 650                         ih3->fsz    = LIO_PCICMD_O3;
 651                 }
 652 
 653         } else {
 654                 ih2          = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
 655                 ih2->tagtype = ATOMIC_TAG;
 656                 ih2->tag     = LIO_CONTROL;
 657                 ih2->raw     = 1;
 658                 ih2->grp     = CFG_GET_CTRL_Q_GRP(oct_cfg);
 659 
 660                 if (sc->datasize) {
 661                         ih2->dlengsz = sc->datasize;
 662                         ih2->rs = 1;
 663                 }
 664 
 665                 irh            = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
 666                 irh->opcode    = opcode;
 667                 irh->subcode   = subcode;
 668 
 669                 /* opcode/subcode specific parameters (ossp) */
 670                 irh->ossp       = irh_ossp;
 671                 sc->cmd.cmd2.ossp[0] = ossp0;
 672                 sc->cmd.cmd2.ossp[1] = ossp1;
 673 
 674                 if (sc->rdatasize) {
 675                         rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
 676                         rdp->pcie_port = oct->pcie_port;
 677                         rdp->rlen      = sc->rdatasize;
 678 
 679                         irh->rflag =  1;
 680                         /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
 681                         ih2->fsz   = LIO_SOFTCMDRESP_IH2;
 682                 } else {
 683                         irh->rflag =  0;
 684                         /* irh + ossp[0] + ossp[1] = 24 bytes */
 685                         ih2->fsz   = LIO_PCICMD_O2;
 686                 }
 687         }
 688 }
 689 
 690 int octeon_send_soft_command(struct octeon_device *oct,
 691                              struct octeon_soft_command *sc)
 692 {
 693         struct octeon_instr_queue *iq;
 694         struct octeon_instr_ih2 *ih2;
 695         struct octeon_instr_ih3 *ih3;
 696         struct octeon_instr_irh *irh;
 697         u32 len;
 698 
 699         iq = oct->instr_queue[sc->iq_no];
 700         if (!iq->allow_soft_cmds) {
 701                 dev_err(&oct->pci_dev->dev, "Soft commands are not allowed on Queue %d\n",
 702                         sc->iq_no);
 703                 INCR_INSTRQUEUE_PKT_COUNT(oct, sc->iq_no, instr_dropped, 1);
 704                 return IQ_SEND_FAILED;
 705         }
 706 
 707         if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
 708                 ih3 =  (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
 709                 if (ih3->dlengsz) {
 710                         WARN_ON(!sc->dmadptr);
 711                         sc->cmd.cmd3.dptr = sc->dmadptr;
 712                 }
 713                 irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
 714                 if (irh->rflag) {
 715                         WARN_ON(!sc->dmarptr);
 716                         WARN_ON(!sc->status_word);
 717                         *sc->status_word = COMPLETION_WORD_INIT;
 718                         sc->cmd.cmd3.rptr = sc->dmarptr;
 719                 }
 720                 len = (u32)ih3->dlengsz;
 721         } else {
 722                 ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
 723                 if (ih2->dlengsz) {
 724                         WARN_ON(!sc->dmadptr);
 725                         sc->cmd.cmd2.dptr = sc->dmadptr;
 726                 }
 727                 irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
 728                 if (irh->rflag) {
 729                         WARN_ON(!sc->dmarptr);
 730                         WARN_ON(!sc->status_word);
 731                         *sc->status_word = COMPLETION_WORD_INIT;
 732                         sc->cmd.cmd2.rptr = sc->dmarptr;
 733                 }
 734                 len = (u32)ih2->dlengsz;
 735         }
 736 
 737         sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS);
 738 
 739         return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
 740                                     len, REQTYPE_SOFT_COMMAND));
 741 }
 742 
 743 int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
 744 {
 745         int i;
 746         u64 dma_addr;
 747         struct octeon_soft_command *sc;
 748 
 749         INIT_LIST_HEAD(&oct->sc_buf_pool.head);
 750         spin_lock_init(&oct->sc_buf_pool.lock);
 751         atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
 752 
 753         for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
 754                 sc = (struct octeon_soft_command *)
 755                         lio_dma_alloc(oct,
 756                                       SOFT_COMMAND_BUFFER_SIZE,
 757                                           (dma_addr_t *)&dma_addr);
 758                 if (!sc) {
 759                         octeon_free_sc_buffer_pool(oct);
 760                         return 1;
 761                 }
 762 
 763                 sc->dma_addr = dma_addr;
 764                 sc->size = SOFT_COMMAND_BUFFER_SIZE;
 765 
 766                 list_add_tail(&sc->node, &oct->sc_buf_pool.head);
 767         }
 768 
 769         return 0;
 770 }
 771 
 772 int octeon_free_sc_done_list(struct octeon_device *oct)
 773 {
 774         struct octeon_response_list *done_sc_list, *zombie_sc_list;
 775         struct octeon_soft_command *sc;
 776         struct list_head *tmp, *tmp2;
 777         spinlock_t *sc_lists_lock; /* lock for response_list */
 778 
 779         done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST];
 780         zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST];
 781 
 782         if (!atomic_read(&done_sc_list->pending_req_count))
 783                 return 0;
 784 
 785         sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock;
 786 
 787         spin_lock_bh(sc_lists_lock);
 788 
 789         list_for_each_safe(tmp, tmp2, &done_sc_list->head) {
 790                 sc = list_entry(tmp, struct octeon_soft_command, node);
 791 
 792                 if (READ_ONCE(sc->caller_is_done)) {
 793                         list_del(&sc->node);
 794                         atomic_dec(&done_sc_list->pending_req_count);
 795 
 796                         if (*sc->status_word == COMPLETION_WORD_INIT) {
 797                                 /* timeout; move sc to zombie list */
 798                                 list_add_tail(&sc->node, &zombie_sc_list->head);
 799                                 atomic_inc(&zombie_sc_list->pending_req_count);
 800                         } else {
 801                                 octeon_free_soft_command(oct, sc);
 802                         }
 803                 }
 804         }
 805 
 806         spin_unlock_bh(sc_lists_lock);
 807 
 808         return 0;
 809 }
 810 
 811 int octeon_free_sc_zombie_list(struct octeon_device *oct)
 812 {
 813         struct octeon_response_list *zombie_sc_list;
 814         struct octeon_soft_command *sc;
 815         struct list_head *tmp, *tmp2;
 816         spinlock_t *sc_lists_lock; /* lock for response_list */
 817 
 818         zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST];
 819         sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock;
 820 
 821         spin_lock_bh(sc_lists_lock);
 822 
 823         list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) {
 824                 list_del(tmp);
 825                 atomic_dec(&zombie_sc_list->pending_req_count);
 826                 sc = list_entry(tmp, struct octeon_soft_command, node);
 827                 octeon_free_soft_command(oct, sc);
 828         }
 829 
 830         spin_unlock_bh(sc_lists_lock);
 831 
 832         return 0;
 833 }
 834 
 835 int octeon_free_sc_buffer_pool(struct octeon_device *oct)
 836 {
 837         struct list_head *tmp, *tmp2;
 838         struct octeon_soft_command *sc;
 839 
 840         octeon_free_sc_zombie_list(oct);
 841 
 842         spin_lock_bh(&oct->sc_buf_pool.lock);
 843 
 844         list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
 845                 list_del(tmp);
 846 
 847                 sc = (struct octeon_soft_command *)tmp;
 848 
 849                 lio_dma_free(oct, sc->size, sc, sc->dma_addr);
 850         }
 851 
 852         INIT_LIST_HEAD(&oct->sc_buf_pool.head);
 853 
 854         spin_unlock_bh(&oct->sc_buf_pool.lock);
 855 
 856         return 0;
 857 }
 858 
 859 struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
 860                                                       u32 datasize,
 861                                                       u32 rdatasize,
 862                                                       u32 ctxsize)
 863 {
 864         u64 dma_addr;
 865         u32 size;
 866         u32 offset = sizeof(struct octeon_soft_command);
 867         struct octeon_soft_command *sc = NULL;
 868         struct list_head *tmp;
 869 
 870         if (!rdatasize)
 871                 rdatasize = 16;
 872 
 873         WARN_ON((offset + datasize + rdatasize + ctxsize) >
 874                SOFT_COMMAND_BUFFER_SIZE);
 875 
 876         spin_lock_bh(&oct->sc_buf_pool.lock);
 877 
 878         if (list_empty(&oct->sc_buf_pool.head)) {
 879                 spin_unlock_bh(&oct->sc_buf_pool.lock);
 880                 return NULL;
 881         }
 882 
 883         list_for_each(tmp, &oct->sc_buf_pool.head)
 884                 break;
 885 
 886         list_del(tmp);
 887 
 888         atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
 889 
 890         spin_unlock_bh(&oct->sc_buf_pool.lock);
 891 
 892         sc = (struct octeon_soft_command *)tmp;
 893 
 894         dma_addr = sc->dma_addr;
 895         size = sc->size;
 896 
 897         memset(sc, 0, sc->size);
 898 
 899         sc->dma_addr = dma_addr;
 900         sc->size = size;
 901 
 902         if (ctxsize) {
 903                 sc->ctxptr = (u8 *)sc + offset;
 904                 sc->ctxsize = ctxsize;
 905         }
 906 
 907         /* Start data at 128 byte boundary */
 908         offset = (offset + ctxsize + 127) & 0xffffff80;
 909 
 910         if (datasize) {
 911                 sc->virtdptr = (u8 *)sc + offset;
 912                 sc->dmadptr = dma_addr + offset;
 913                 sc->datasize = datasize;
 914         }
 915 
 916         /* Start rdata at 128 byte boundary */
 917         offset = (offset + datasize + 127) & 0xffffff80;
 918 
 919         if (rdatasize) {
 920                 WARN_ON(rdatasize < 16);
 921                 sc->virtrptr = (u8 *)sc + offset;
 922                 sc->dmarptr = dma_addr + offset;
 923                 sc->rdatasize = rdatasize;
 924                 sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
 925         }
 926 
 927         return sc;
 928 }
 929 
 930 void octeon_free_soft_command(struct octeon_device *oct,
 931                               struct octeon_soft_command *sc)
 932 {
 933         spin_lock_bh(&oct->sc_buf_pool.lock);
 934 
 935         list_add_tail(&sc->node, &oct->sc_buf_pool.head);
 936 
 937         atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
 938 
 939         spin_unlock_bh(&oct->sc_buf_pool.lock);
 940 }

/* [<][>][^][v][top][bottom][index][help] */