root/drivers/crypto/qat/qat_common/adf_sriov.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. adf_iov_send_resp
  2. adf_vf2pf_bh_handler
  3. adf_enable_sriov
  4. adf_disable_sriov
  5. adf_sriov_configure
  6. adf_init_pf_wq
  7. adf_exit_pf_wq

   1 /*
   2   This file is provided under a dual BSD/GPLv2 license.  When using or
   3   redistributing this file, you may do so under either license.
   4 
   5   GPL LICENSE SUMMARY
   6   Copyright(c) 2015 Intel Corporation.
   7   This program is free software; you can redistribute it and/or modify
   8   it under the terms of version 2 of the GNU General Public License as
   9   published by the Free Software Foundation.
  10 
  11   This program is distributed in the hope that it will be useful, but
  12   WITHOUT ANY WARRANTY; without even the implied warranty of
  13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14   General Public License for more details.
  15 
  16   Contact Information:
  17   qat-linux@intel.com
  18 
  19   BSD LICENSE
  20   Copyright(c) 2015 Intel Corporation.
  21   Redistribution and use in source and binary forms, with or without
  22   modification, are permitted provided that the following conditions
  23   are met:
  24 
  25     * Redistributions of source code must retain the above copyright
  26       notice, this list of conditions and the following disclaimer.
  27     * Redistributions in binary form must reproduce the above copyright
  28       notice, this list of conditions and the following disclaimer in
  29       the documentation and/or other materials provided with the
  30       distribution.
  31     * Neither the name of Intel Corporation nor the names of its
  32       contributors may be used to endorse or promote products derived
  33       from this software without specific prior written permission.
  34 
  35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46 */
  47 #include <linux/workqueue.h>
  48 #include <linux/pci.h>
  49 #include <linux/device.h>
  50 #include <linux/iommu.h>
  51 #include "adf_common_drv.h"
  52 #include "adf_cfg.h"
  53 #include "adf_pf2vf_msg.h"
  54 
  55 static struct workqueue_struct *pf2vf_resp_wq;
  56 
  57 #define ME2FUNCTION_MAP_A_OFFSET        (0x3A400 + 0x190)
  58 #define ME2FUNCTION_MAP_A_NUM_REGS      96
  59 
  60 #define ME2FUNCTION_MAP_B_OFFSET        (0x3A400 + 0x310)
  61 #define ME2FUNCTION_MAP_B_NUM_REGS      12
  62 
  63 #define ME2FUNCTION_MAP_REG_SIZE        4
  64 #define ME2FUNCTION_MAP_VALID           BIT(7)
  65 
  66 #define READ_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index)               \
  67         ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET +           \
  68                    ME2FUNCTION_MAP_REG_SIZE * index)
  69 
  70 #define WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index, value)       \
  71         ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET +           \
  72                    ME2FUNCTION_MAP_REG_SIZE * index, value)
  73 
  74 #define READ_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index)               \
  75         ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET +           \
  76                    ME2FUNCTION_MAP_REG_SIZE * index)
  77 
  78 #define WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index, value)       \
  79         ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET +           \
  80                    ME2FUNCTION_MAP_REG_SIZE * index, value)
  81 
  82 struct adf_pf2vf_resp {
  83         struct work_struct pf2vf_resp_work;
  84         struct adf_accel_vf_info *vf_info;
  85 };
  86 
  87 static void adf_iov_send_resp(struct work_struct *work)
  88 {
  89         struct adf_pf2vf_resp *pf2vf_resp =
  90                 container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
  91 
  92         adf_vf2pf_req_hndl(pf2vf_resp->vf_info);
  93         kfree(pf2vf_resp);
  94 }
  95 
  96 static void adf_vf2pf_bh_handler(void *data)
  97 {
  98         struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
  99         struct adf_pf2vf_resp *pf2vf_resp;
 100 
 101         pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
 102         if (!pf2vf_resp)
 103                 return;
 104 
 105         pf2vf_resp->vf_info = vf_info;
 106         INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
 107         queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
 108 }
 109 
 110 static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
 111 {
 112         struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
 113         int totalvfs = pci_sriov_get_totalvfs(pdev);
 114         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 115         struct adf_bar *pmisc =
 116                         &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
 117         void __iomem *pmisc_addr = pmisc->virt_addr;
 118         struct adf_accel_vf_info *vf_info;
 119         int i;
 120         u32 reg;
 121 
 122         for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
 123              i++, vf_info++) {
 124                 /* This ptr will be populated when VFs will be created */
 125                 vf_info->accel_dev = accel_dev;
 126                 vf_info->vf_nr = i;
 127 
 128                 tasklet_init(&vf_info->vf2pf_bh_tasklet,
 129                              (void *)adf_vf2pf_bh_handler,
 130                              (unsigned long)vf_info);
 131                 mutex_init(&vf_info->pf2vf_lock);
 132                 ratelimit_state_init(&vf_info->vf2pf_ratelimit,
 133                                      DEFAULT_RATELIMIT_INTERVAL,
 134                                      DEFAULT_RATELIMIT_BURST);
 135         }
 136 
 137         /* Set Valid bits in ME Thread to PCIe Function Mapping Group A */
 138         for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
 139                 reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
 140                 reg |= ME2FUNCTION_MAP_VALID;
 141                 WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
 142         }
 143 
 144         /* Set Valid bits in ME Thread to PCIe Function Mapping Group B */
 145         for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
 146                 reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
 147                 reg |= ME2FUNCTION_MAP_VALID;
 148                 WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
 149         }
 150 
 151         /* Enable VF to PF interrupts for all VFs */
 152         adf_enable_vf2pf_interrupts(accel_dev, GENMASK_ULL(totalvfs - 1, 0));
 153 
 154         /*
 155          * Due to the hardware design, when SR-IOV and the ring arbiter
 156          * are enabled all the VFs supported in hardware must be enabled in
 157          * order for all the hardware resources (i.e. bundles) to be usable.
 158          * When SR-IOV is enabled, each of the VFs will own one bundle.
 159          */
 160         return pci_enable_sriov(pdev, totalvfs);
 161 }
 162 
 163 /**
 164  * adf_disable_sriov() - Disable SRIOV for the device
 165  * @accel_dev:  Pointer to accel device.
 166  *
 167  * Function disables SRIOV for the accel device.
 168  *
 169  * Return: 0 on success, error code otherwise.
 170  */
 171 void adf_disable_sriov(struct adf_accel_dev *accel_dev)
 172 {
 173         struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 174         struct adf_bar *pmisc =
 175                         &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
 176         void __iomem *pmisc_addr = pmisc->virt_addr;
 177         int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
 178         struct adf_accel_vf_info *vf;
 179         u32 reg;
 180         int i;
 181 
 182         if (!accel_dev->pf.vf_info)
 183                 return;
 184 
 185         adf_pf2vf_notify_restarting(accel_dev);
 186 
 187         pci_disable_sriov(accel_to_pci_dev(accel_dev));
 188 
 189         /* Disable VF to PF interrupts */
 190         adf_disable_vf2pf_interrupts(accel_dev, 0xFFFFFFFF);
 191 
 192         /* Clear Valid bits in ME Thread to PCIe Function Mapping Group A */
 193         for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
 194                 reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
 195                 reg &= ~ME2FUNCTION_MAP_VALID;
 196                 WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
 197         }
 198 
 199         /* Clear Valid bits in ME Thread to PCIe Function Mapping Group B */
 200         for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
 201                 reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
 202                 reg &= ~ME2FUNCTION_MAP_VALID;
 203                 WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
 204         }
 205 
 206         for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
 207                 tasklet_disable(&vf->vf2pf_bh_tasklet);
 208                 tasklet_kill(&vf->vf2pf_bh_tasklet);
 209                 mutex_destroy(&vf->pf2vf_lock);
 210         }
 211 
 212         kfree(accel_dev->pf.vf_info);
 213         accel_dev->pf.vf_info = NULL;
 214 }
 215 EXPORT_SYMBOL_GPL(adf_disable_sriov);
 216 
 217 /**
 218  * adf_sriov_configure() - Enable SRIOV for the device
 219  * @pdev:  Pointer to pci device.
 220  *
 221  * Function enables SRIOV for the pci device.
 222  *
 223  * Return: 0 on success, error code otherwise.
 224  */
 225 int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
 226 {
 227         struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
 228         int totalvfs = pci_sriov_get_totalvfs(pdev);
 229         unsigned long val;
 230         int ret;
 231 
 232         if (!accel_dev) {
 233                 dev_err(&pdev->dev, "Failed to find accel_dev\n");
 234                 return -EFAULT;
 235         }
 236 
 237         if (!iommu_present(&pci_bus_type))
 238                 dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n");
 239 
 240         if (accel_dev->pf.vf_info) {
 241                 dev_info(&pdev->dev, "Already enabled for this device\n");
 242                 return -EINVAL;
 243         }
 244 
 245         if (adf_dev_started(accel_dev)) {
 246                 if (adf_devmgr_in_reset(accel_dev) ||
 247                     adf_dev_in_use(accel_dev)) {
 248                         dev_err(&GET_DEV(accel_dev), "Device busy\n");
 249                         return -EBUSY;
 250                 }
 251 
 252                 adf_dev_stop(accel_dev);
 253                 adf_dev_shutdown(accel_dev);
 254         }
 255 
 256         if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
 257                 return -EFAULT;
 258         val = 0;
 259         if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
 260                                         ADF_NUM_CY, (void *)&val, ADF_DEC))
 261                 return -EFAULT;
 262 
 263         set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
 264 
 265         /* Allocate memory for VF info structs */
 266         accel_dev->pf.vf_info = kcalloc(totalvfs,
 267                                         sizeof(struct adf_accel_vf_info),
 268                                         GFP_KERNEL);
 269         if (!accel_dev->pf.vf_info)
 270                 return -ENOMEM;
 271 
 272         if (adf_dev_init(accel_dev)) {
 273                 dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
 274                         accel_dev->accel_id);
 275                 return -EFAULT;
 276         }
 277 
 278         if (adf_dev_start(accel_dev)) {
 279                 dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
 280                         accel_dev->accel_id);
 281                 return -EFAULT;
 282         }
 283 
 284         ret = adf_enable_sriov(accel_dev);
 285         if (ret)
 286                 return ret;
 287 
 288         return numvfs;
 289 }
 290 EXPORT_SYMBOL_GPL(adf_sriov_configure);
 291 
 292 int __init adf_init_pf_wq(void)
 293 {
 294         /* Workqueue for PF2VF responses */
 295         pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
 296 
 297         return !pf2vf_resp_wq ? -ENOMEM : 0;
 298 }
 299 
 300 void adf_exit_pf_wq(void)
 301 {
 302         if (pf2vf_resp_wq) {
 303                 destroy_workqueue(pf2vf_resp_wq);
 304                 pf2vf_resp_wq = NULL;
 305         }
 306 }

/* [<][>][^][v][top][bottom][index][help] */