1/* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#include <linux/kernel.h> 34#include <linux/module.h> 35#include <linux/hardirq.h> 36#include <linux/mlx5/driver.h> 37#include <linux/mlx5/cmd.h> 38#include <rdma/ib_verbs.h> 39#include <linux/mlx5/cq.h> 40#include "mlx5_core.h" 41 42void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) 43{ 44 struct mlx5_core_cq *cq; 45 struct mlx5_cq_table *table = &dev->priv.cq_table; 46 47 spin_lock(&table->lock); 48 cq = radix_tree_lookup(&table->tree, cqn); 49 if (likely(cq)) 50 atomic_inc(&cq->refcount); 51 spin_unlock(&table->lock); 52 53 if (!cq) { 54 mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn); 55 return; 56 } 57 58 ++cq->arm_sn; 59 60 cq->comp(cq); 61 62 if (atomic_dec_and_test(&cq->refcount)) 63 complete(&cq->free); 64} 65 66void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) 67{ 68 struct mlx5_cq_table *table = &dev->priv.cq_table; 69 struct mlx5_core_cq *cq; 70 71 spin_lock(&table->lock); 72 73 cq = radix_tree_lookup(&table->tree, cqn); 74 if (cq) 75 atomic_inc(&cq->refcount); 76 77 spin_unlock(&table->lock); 78 79 if (!cq) { 80 mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn); 81 return; 82 } 83 84 cq->event(cq, event_type); 85 86 if (atomic_dec_and_test(&cq->refcount)) 87 complete(&cq->free); 88} 89 90 91int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 92 struct mlx5_create_cq_mbox_in *in, int inlen) 93{ 94 int err; 95 struct mlx5_cq_table *table = &dev->priv.cq_table; 96 struct mlx5_create_cq_mbox_out out; 97 struct mlx5_destroy_cq_mbox_in din; 98 struct mlx5_destroy_cq_mbox_out dout; 99 100 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ); 101 memset(&out, 0, sizeof(out)); 102 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 103 if (err) 104 return err; 105 106 if (out.hdr.status) 107 return mlx5_cmd_status_to_err(&out.hdr); 108 109 cq->cqn = be32_to_cpu(out.cqn) & 0xffffff; 110 cq->cons_index = 0; 111 cq->arm_sn = 0; 112 atomic_set(&cq->refcount, 1); 113 init_completion(&cq->free); 114 115 spin_lock_irq(&table->lock); 116 err = radix_tree_insert(&table->tree, cq->cqn, cq); 117 spin_unlock_irq(&table->lock); 118 if (err) 119 goto err_cmd; 120 121 cq->pid = current->pid; 122 err = mlx5_debug_cq_add(dev, cq); 123 if (err) 124 mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", 125 cq->cqn); 126 127 return 0; 128 129err_cmd: 130 memset(&din, 0, sizeof(din)); 131 memset(&dout, 0, sizeof(dout)); 132 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); 133 mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); 134 return err; 135} 136EXPORT_SYMBOL(mlx5_core_create_cq); 137 138int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) 139{ 140 struct mlx5_cq_table *table = &dev->priv.cq_table; 141 struct mlx5_destroy_cq_mbox_in in; 142 struct mlx5_destroy_cq_mbox_out out; 143 struct mlx5_core_cq *tmp; 144 int err; 145 146 spin_lock_irq(&table->lock); 147 tmp = radix_tree_delete(&table->tree, cq->cqn); 148 spin_unlock_irq(&table->lock); 149 if (!tmp) { 150 mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); 151 return -EINVAL; 152 } 153 if (tmp != cq) { 154 mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn); 155 return -EINVAL; 156 } 157 158 memset(&in, 0, sizeof(in)); 159 memset(&out, 0, sizeof(out)); 160 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ); 161 in.cqn = cpu_to_be32(cq->cqn); 162 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 163 if (err) 164 return err; 165 166 if (out.hdr.status) 167 return mlx5_cmd_status_to_err(&out.hdr); 168 169 synchronize_irq(cq->irqn); 170 171 mlx5_debug_cq_remove(dev, cq); 172 if (atomic_dec_and_test(&cq->refcount)) 173 complete(&cq->free); 174 wait_for_completion(&cq->free); 175 176 return 0; 177} 178EXPORT_SYMBOL(mlx5_core_destroy_cq); 179 180int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 181 struct mlx5_query_cq_mbox_out *out) 182{ 183 struct mlx5_query_cq_mbox_in in; 184 int err; 185 186 memset(&in, 0, sizeof(in)); 187 memset(out, 0, sizeof(*out)); 188 189 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ); 190 in.cqn = cpu_to_be32(cq->cqn); 191 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); 192 if (err) 193 return err; 194 195 if (out->hdr.status) 196 return mlx5_cmd_status_to_err(&out->hdr); 197 198 return err; 199} 200EXPORT_SYMBOL(mlx5_core_query_cq); 201 202 203int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 204 struct mlx5_modify_cq_mbox_in *in, int in_sz) 205{ 206 struct mlx5_modify_cq_mbox_out out; 207 int err; 208 209 memset(&out, 0, sizeof(out)); 210 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ); 211 err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out)); 212 if (err) 213 return err; 214 215 if (out.hdr.status) 216 return mlx5_cmd_status_to_err(&out.hdr); 217 218 return 0; 219} 220EXPORT_SYMBOL(mlx5_core_modify_cq); 221 222int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, 223 struct mlx5_core_cq *cq, 224 u16 cq_period, 225 u16 cq_max_count) 226{ 227 struct mlx5_modify_cq_mbox_in in; 228 229 memset(&in, 0, sizeof(in)); 230 231 in.cqn = cpu_to_be32(cq->cqn); 232 in.ctx.cq_period = cpu_to_be16(cq_period); 233 in.ctx.cq_max_count = cpu_to_be16(cq_max_count); 234 in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD | 235 MLX5_CQ_MODIFY_COUNT); 236 237 return mlx5_core_modify_cq(dev, cq, &in, sizeof(in)); 238} 239 240int mlx5_init_cq_table(struct mlx5_core_dev *dev) 241{ 242 struct mlx5_cq_table *table = &dev->priv.cq_table; 243 int err; 244 245 memset(table, 0, sizeof(*table)); 246 spin_lock_init(&table->lock); 247 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); 248 err = mlx5_cq_debugfs_init(dev); 249 250 return err; 251} 252 253void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev) 254{ 255 mlx5_cq_debugfs_cleanup(dev); 256} 257