1/* 2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/rculist.h> 35 36#include "qib.h" 37 38/** 39 * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct 40 * @qp: the QP to link 41 */ 42static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp) 43{ 44 struct qib_mcast_qp *mqp; 45 46 mqp = kmalloc(sizeof(*mqp), GFP_KERNEL); 47 if (!mqp) 48 goto bail; 49 50 mqp->qp = qp; 51 atomic_inc(&qp->refcount); 52 53bail: 54 return mqp; 55} 56 57static void qib_mcast_qp_free(struct qib_mcast_qp *mqp) 58{ 59 struct qib_qp *qp = mqp->qp; 60 61 /* Notify qib_destroy_qp() if it is waiting. */ 62 if (atomic_dec_and_test(&qp->refcount)) 63 wake_up(&qp->wait); 64 65 kfree(mqp); 66} 67 68/** 69 * qib_mcast_alloc - allocate the multicast GID structure 70 * @mgid: the multicast GID 71 * 72 * A list of QPs will be attached to this structure. 73 */ 74static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid) 75{ 76 struct qib_mcast *mcast; 77 78 mcast = kmalloc(sizeof(*mcast), GFP_KERNEL); 79 if (!mcast) 80 goto bail; 81 82 mcast->mgid = *mgid; 83 INIT_LIST_HEAD(&mcast->qp_list); 84 init_waitqueue_head(&mcast->wait); 85 atomic_set(&mcast->refcount, 0); 86 mcast->n_attached = 0; 87 88bail: 89 return mcast; 90} 91 92static void qib_mcast_free(struct qib_mcast *mcast) 93{ 94 struct qib_mcast_qp *p, *tmp; 95 96 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) 97 qib_mcast_qp_free(p); 98 99 kfree(mcast); 100} 101 102/** 103 * qib_mcast_find - search the global table for the given multicast GID 104 * @ibp: the IB port structure 105 * @mgid: the multicast GID to search for 106 * 107 * Returns NULL if not found. 108 * 109 * The caller is responsible for decrementing the reference count if found. 110 */ 111struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid) 112{ 113 struct rb_node *n; 114 unsigned long flags; 115 struct qib_mcast *mcast; 116 117 spin_lock_irqsave(&ibp->lock, flags); 118 n = ibp->mcast_tree.rb_node; 119 while (n) { 120 int ret; 121 122 mcast = rb_entry(n, struct qib_mcast, rb_node); 123 124 ret = memcmp(mgid->raw, mcast->mgid.raw, 125 sizeof(union ib_gid)); 126 if (ret < 0) 127 n = n->rb_left; 128 else if (ret > 0) 129 n = n->rb_right; 130 else { 131 atomic_inc(&mcast->refcount); 132 spin_unlock_irqrestore(&ibp->lock, flags); 133 goto bail; 134 } 135 } 136 spin_unlock_irqrestore(&ibp->lock, flags); 137 138 mcast = NULL; 139 140bail: 141 return mcast; 142} 143 144/** 145 * qib_mcast_add - insert mcast GID into table and attach QP struct 146 * @mcast: the mcast GID table 147 * @mqp: the QP to attach 148 * 149 * Return zero if both were added. Return EEXIST if the GID was already in 150 * the table but the QP was added. Return ESRCH if the QP was already 151 * attached and neither structure was added. 152 */ 153static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp, 154 struct qib_mcast *mcast, struct qib_mcast_qp *mqp) 155{ 156 struct rb_node **n = &ibp->mcast_tree.rb_node; 157 struct rb_node *pn = NULL; 158 int ret; 159 160 spin_lock_irq(&ibp->lock); 161 162 while (*n) { 163 struct qib_mcast *tmcast; 164 struct qib_mcast_qp *p; 165 166 pn = *n; 167 tmcast = rb_entry(pn, struct qib_mcast, rb_node); 168 169 ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw, 170 sizeof(union ib_gid)); 171 if (ret < 0) { 172 n = &pn->rb_left; 173 continue; 174 } 175 if (ret > 0) { 176 n = &pn->rb_right; 177 continue; 178 } 179 180 /* Search the QP list to see if this is already there. */ 181 list_for_each_entry_rcu(p, &tmcast->qp_list, list) { 182 if (p->qp == mqp->qp) { 183 ret = ESRCH; 184 goto bail; 185 } 186 } 187 if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) { 188 ret = ENOMEM; 189 goto bail; 190 } 191 192 tmcast->n_attached++; 193 194 list_add_tail_rcu(&mqp->list, &tmcast->qp_list); 195 ret = EEXIST; 196 goto bail; 197 } 198 199 spin_lock(&dev->n_mcast_grps_lock); 200 if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) { 201 spin_unlock(&dev->n_mcast_grps_lock); 202 ret = ENOMEM; 203 goto bail; 204 } 205 206 dev->n_mcast_grps_allocated++; 207 spin_unlock(&dev->n_mcast_grps_lock); 208 209 mcast->n_attached++; 210 211 list_add_tail_rcu(&mqp->list, &mcast->qp_list); 212 213 atomic_inc(&mcast->refcount); 214 rb_link_node(&mcast->rb_node, pn, n); 215 rb_insert_color(&mcast->rb_node, &ibp->mcast_tree); 216 217 ret = 0; 218 219bail: 220 spin_unlock_irq(&ibp->lock); 221 222 return ret; 223} 224 225int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 226{ 227 struct qib_qp *qp = to_iqp(ibqp); 228 struct qib_ibdev *dev = to_idev(ibqp->device); 229 struct qib_ibport *ibp; 230 struct qib_mcast *mcast; 231 struct qib_mcast_qp *mqp; 232 int ret; 233 234 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { 235 ret = -EINVAL; 236 goto bail; 237 } 238 239 /* 240 * Allocate data structures since its better to do this outside of 241 * spin locks and it will most likely be needed. 242 */ 243 mcast = qib_mcast_alloc(gid); 244 if (mcast == NULL) { 245 ret = -ENOMEM; 246 goto bail; 247 } 248 mqp = qib_mcast_qp_alloc(qp); 249 if (mqp == NULL) { 250 qib_mcast_free(mcast); 251 ret = -ENOMEM; 252 goto bail; 253 } 254 ibp = to_iport(ibqp->device, qp->port_num); 255 switch (qib_mcast_add(dev, ibp, mcast, mqp)) { 256 case ESRCH: 257 /* Neither was used: OK to attach the same QP twice. */ 258 qib_mcast_qp_free(mqp); 259 qib_mcast_free(mcast); 260 break; 261 262 case EEXIST: /* The mcast wasn't used */ 263 qib_mcast_free(mcast); 264 break; 265 266 case ENOMEM: 267 /* Exceeded the maximum number of mcast groups. */ 268 qib_mcast_qp_free(mqp); 269 qib_mcast_free(mcast); 270 ret = -ENOMEM; 271 goto bail; 272 273 default: 274 break; 275 } 276 277 ret = 0; 278 279bail: 280 return ret; 281} 282 283int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 284{ 285 struct qib_qp *qp = to_iqp(ibqp); 286 struct qib_ibdev *dev = to_idev(ibqp->device); 287 struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num); 288 struct qib_mcast *mcast = NULL; 289 struct qib_mcast_qp *p, *tmp, *delp = NULL; 290 struct rb_node *n; 291 int last = 0; 292 int ret; 293 294 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) 295 return -EINVAL; 296 297 spin_lock_irq(&ibp->lock); 298 299 /* Find the GID in the mcast table. */ 300 n = ibp->mcast_tree.rb_node; 301 while (1) { 302 if (n == NULL) { 303 spin_unlock_irq(&ibp->lock); 304 return -EINVAL; 305 } 306 307 mcast = rb_entry(n, struct qib_mcast, rb_node); 308 ret = memcmp(gid->raw, mcast->mgid.raw, 309 sizeof(union ib_gid)); 310 if (ret < 0) 311 n = n->rb_left; 312 else if (ret > 0) 313 n = n->rb_right; 314 else 315 break; 316 } 317 318 /* Search the QP list. */ 319 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) { 320 if (p->qp != qp) 321 continue; 322 /* 323 * We found it, so remove it, but don't poison the forward 324 * link until we are sure there are no list walkers. 325 */ 326 list_del_rcu(&p->list); 327 mcast->n_attached--; 328 delp = p; 329 330 /* If this was the last attached QP, remove the GID too. */ 331 if (list_empty(&mcast->qp_list)) { 332 rb_erase(&mcast->rb_node, &ibp->mcast_tree); 333 last = 1; 334 } 335 break; 336 } 337 338 spin_unlock_irq(&ibp->lock); 339 /* QP not attached */ 340 if (!delp) 341 return -EINVAL; 342 /* 343 * Wait for any list walkers to finish before freeing the 344 * list element. 345 */ 346 wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); 347 qib_mcast_qp_free(delp); 348 349 if (last) { 350 atomic_dec(&mcast->refcount); 351 wait_event(mcast->wait, !atomic_read(&mcast->refcount)); 352 qib_mcast_free(mcast); 353 spin_lock_irq(&dev->n_mcast_grps_lock); 354 dev->n_mcast_grps_allocated--; 355 spin_unlock_irq(&dev->n_mcast_grps_lock); 356 } 357 return 0; 358} 359 360int qib_mcast_tree_empty(struct qib_ibport *ibp) 361{ 362 return ibp->mcast_tree.rb_node == NULL; 363} 364