1/* 2 * Copyright (c) 2006 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33#include <linux/kernel.h> 34#include <linux/slab.h> 35#include <linux/ratelimit.h> 36 37#include "rds.h" 38#include "iw.h" 39 40 41/* 42 * This is stored as mr->r_trans_private. 43 */ 44struct rds_iw_mr { 45 struct rds_iw_device *device; 46 struct rds_iw_mr_pool *pool; 47 struct rdma_cm_id *cm_id; 48 49 struct ib_mr *mr; 50 struct ib_fast_reg_page_list *page_list; 51 52 struct rds_iw_mapping mapping; 53 unsigned char remap_count; 54}; 55 56/* 57 * Our own little MR pool 58 */ 59struct rds_iw_mr_pool { 60 struct rds_iw_device *device; /* back ptr to the device that owns us */ 61 62 struct mutex flush_lock; /* serialize fmr invalidate */ 63 struct work_struct flush_worker; /* flush worker */ 64 65 spinlock_t list_lock; /* protect variables below */ 66 atomic_t item_count; /* total # of MRs */ 67 atomic_t dirty_count; /* # dirty of MRs */ 68 struct list_head dirty_list; /* dirty mappings */ 69 struct list_head clean_list; /* unused & unamapped MRs */ 70 atomic_t free_pinned; /* memory pinned by free MRs */ 71 unsigned long max_message_size; /* in pages */ 72 unsigned long max_items; 73 unsigned long max_items_soft; 74 unsigned long max_free_pinned; 75 int max_pages; 76}; 77 78static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all); 79static void rds_iw_mr_pool_flush_worker(struct work_struct *work); 80static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 81static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, 82 struct rds_iw_mr *ibmr, 83 struct scatterlist *sg, unsigned int nents); 84static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 85static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, 86 struct list_head *unmap_list, 87 struct list_head *kill_list, 88 int *unpinned); 89static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); 90 91static int rds_iw_get_device(struct sockaddr_in *src, struct sockaddr_in *dst, 92 struct rds_iw_device **rds_iwdev, 93 struct rdma_cm_id **cm_id) 94{ 95 struct rds_iw_device *iwdev; 96 struct rds_iw_cm_id *i_cm_id; 97 98 *rds_iwdev = NULL; 99 *cm_id = NULL; 100 101 list_for_each_entry(iwdev, &rds_iw_devices, list) { 102 spin_lock_irq(&iwdev->spinlock); 103 list_for_each_entry(i_cm_id, &iwdev->cm_id_list, list) { 104 struct sockaddr_in *src_addr, *dst_addr; 105 106 src_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.src_addr; 107 dst_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.dst_addr; 108 109 rdsdebug("local ipaddr = %x port %d, " 110 "remote ipaddr = %x port %d" 111 "..looking for %x port %d, " 112 "remote ipaddr = %x port %d\n", 113 src_addr->sin_addr.s_addr, 114 src_addr->sin_port, 115 dst_addr->sin_addr.s_addr, 116 dst_addr->sin_port, 117 src->sin_addr.s_addr, 118 src->sin_port, 119 dst->sin_addr.s_addr, 120 dst->sin_port); 121#ifdef WORKING_TUPLE_DETECTION 122 if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr && 123 src_addr->sin_port == src->sin_port && 124 dst_addr->sin_addr.s_addr == dst->sin_addr.s_addr && 125 dst_addr->sin_port == dst->sin_port) { 126#else 127 /* FIXME - needs to compare the local and remote 128 * ipaddr/port tuple, but the ipaddr is the only 129 * available information in the rds_sock (as the rest are 130 * zero'ed. It doesn't appear to be properly populated 131 * during connection setup... 132 */ 133 if (src_addr->sin_addr.s_addr == src->sin_addr.s_addr) { 134#endif 135 spin_unlock_irq(&iwdev->spinlock); 136 *rds_iwdev = iwdev; 137 *cm_id = i_cm_id->cm_id; 138 return 0; 139 } 140 } 141 spin_unlock_irq(&iwdev->spinlock); 142 } 143 144 return 1; 145} 146 147static int rds_iw_add_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id) 148{ 149 struct rds_iw_cm_id *i_cm_id; 150 151 i_cm_id = kmalloc(sizeof *i_cm_id, GFP_KERNEL); 152 if (!i_cm_id) 153 return -ENOMEM; 154 155 i_cm_id->cm_id = cm_id; 156 157 spin_lock_irq(&rds_iwdev->spinlock); 158 list_add_tail(&i_cm_id->list, &rds_iwdev->cm_id_list); 159 spin_unlock_irq(&rds_iwdev->spinlock); 160 161 return 0; 162} 163 164static void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, 165 struct rdma_cm_id *cm_id) 166{ 167 struct rds_iw_cm_id *i_cm_id; 168 169 spin_lock_irq(&rds_iwdev->spinlock); 170 list_for_each_entry(i_cm_id, &rds_iwdev->cm_id_list, list) { 171 if (i_cm_id->cm_id == cm_id) { 172 list_del(&i_cm_id->list); 173 kfree(i_cm_id); 174 break; 175 } 176 } 177 spin_unlock_irq(&rds_iwdev->spinlock); 178} 179 180 181int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id) 182{ 183 struct sockaddr_in *src_addr, *dst_addr; 184 struct rds_iw_device *rds_iwdev_old; 185 struct rdma_cm_id *pcm_id; 186 int rc; 187 188 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr; 189 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr; 190 191 rc = rds_iw_get_device(src_addr, dst_addr, &rds_iwdev_old, &pcm_id); 192 if (rc) 193 rds_iw_remove_cm_id(rds_iwdev, cm_id); 194 195 return rds_iw_add_cm_id(rds_iwdev, cm_id); 196} 197 198void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn) 199{ 200 struct rds_iw_connection *ic = conn->c_transport_data; 201 202 /* conn was previously on the nodev_conns_list */ 203 spin_lock_irq(&iw_nodev_conns_lock); 204 BUG_ON(list_empty(&iw_nodev_conns)); 205 BUG_ON(list_empty(&ic->iw_node)); 206 list_del(&ic->iw_node); 207 208 spin_lock(&rds_iwdev->spinlock); 209 list_add_tail(&ic->iw_node, &rds_iwdev->conn_list); 210 spin_unlock(&rds_iwdev->spinlock); 211 spin_unlock_irq(&iw_nodev_conns_lock); 212 213 ic->rds_iwdev = rds_iwdev; 214} 215 216void rds_iw_remove_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *conn) 217{ 218 struct rds_iw_connection *ic = conn->c_transport_data; 219 220 /* place conn on nodev_conns_list */ 221 spin_lock(&iw_nodev_conns_lock); 222 223 spin_lock_irq(&rds_iwdev->spinlock); 224 BUG_ON(list_empty(&ic->iw_node)); 225 list_del(&ic->iw_node); 226 spin_unlock_irq(&rds_iwdev->spinlock); 227 228 list_add_tail(&ic->iw_node, &iw_nodev_conns); 229 230 spin_unlock(&iw_nodev_conns_lock); 231 232 rds_iw_remove_cm_id(ic->rds_iwdev, ic->i_cm_id); 233 ic->rds_iwdev = NULL; 234} 235 236void __rds_iw_destroy_conns(struct list_head *list, spinlock_t *list_lock) 237{ 238 struct rds_iw_connection *ic, *_ic; 239 LIST_HEAD(tmp_list); 240 241 /* avoid calling conn_destroy with irqs off */ 242 spin_lock_irq(list_lock); 243 list_splice(list, &tmp_list); 244 INIT_LIST_HEAD(list); 245 spin_unlock_irq(list_lock); 246 247 list_for_each_entry_safe(ic, _ic, &tmp_list, iw_node) 248 rds_conn_destroy(ic->conn); 249} 250 251static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg, 252 struct scatterlist *list, unsigned int sg_len) 253{ 254 sg->list = list; 255 sg->len = sg_len; 256 sg->dma_len = 0; 257 sg->dma_npages = 0; 258 sg->bytes = 0; 259} 260 261static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev, 262 struct rds_iw_scatterlist *sg) 263{ 264 struct ib_device *dev = rds_iwdev->dev; 265 u64 *dma_pages = NULL; 266 int i, j, ret; 267 268 WARN_ON(sg->dma_len); 269 270 sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL); 271 if (unlikely(!sg->dma_len)) { 272 printk(KERN_WARNING "RDS/IW: dma_map_sg failed!\n"); 273 return ERR_PTR(-EBUSY); 274 } 275 276 sg->bytes = 0; 277 sg->dma_npages = 0; 278 279 ret = -EINVAL; 280 for (i = 0; i < sg->dma_len; ++i) { 281 unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]); 282 u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]); 283 u64 end_addr; 284 285 sg->bytes += dma_len; 286 287 end_addr = dma_addr + dma_len; 288 if (dma_addr & PAGE_MASK) { 289 if (i > 0) 290 goto out_unmap; 291 dma_addr &= ~PAGE_MASK; 292 } 293 if (end_addr & PAGE_MASK) { 294 if (i < sg->dma_len - 1) 295 goto out_unmap; 296 end_addr = (end_addr + PAGE_MASK) & ~PAGE_MASK; 297 } 298 299 sg->dma_npages += (end_addr - dma_addr) >> PAGE_SHIFT; 300 } 301 302 /* Now gather the dma addrs into one list */ 303 if (sg->dma_npages > fastreg_message_size) 304 goto out_unmap; 305 306 dma_pages = kmalloc(sizeof(u64) * sg->dma_npages, GFP_ATOMIC); 307 if (!dma_pages) { 308 ret = -ENOMEM; 309 goto out_unmap; 310 } 311 312 for (i = j = 0; i < sg->dma_len; ++i) { 313 unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]); 314 u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]); 315 u64 end_addr; 316 317 end_addr = dma_addr + dma_len; 318 dma_addr &= ~PAGE_MASK; 319 for (; dma_addr < end_addr; dma_addr += PAGE_SIZE) 320 dma_pages[j++] = dma_addr; 321 BUG_ON(j > sg->dma_npages); 322 } 323 324 return dma_pages; 325 326out_unmap: 327 ib_dma_unmap_sg(rds_iwdev->dev, sg->list, sg->len, DMA_BIDIRECTIONAL); 328 sg->dma_len = 0; 329 kfree(dma_pages); 330 return ERR_PTR(ret); 331} 332 333 334struct rds_iw_mr_pool *rds_iw_create_mr_pool(struct rds_iw_device *rds_iwdev) 335{ 336 struct rds_iw_mr_pool *pool; 337 338 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 339 if (!pool) { 340 printk(KERN_WARNING "RDS/IW: rds_iw_create_mr_pool alloc error\n"); 341 return ERR_PTR(-ENOMEM); 342 } 343 344 pool->device = rds_iwdev; 345 INIT_LIST_HEAD(&pool->dirty_list); 346 INIT_LIST_HEAD(&pool->clean_list); 347 mutex_init(&pool->flush_lock); 348 spin_lock_init(&pool->list_lock); 349 INIT_WORK(&pool->flush_worker, rds_iw_mr_pool_flush_worker); 350 351 pool->max_message_size = fastreg_message_size; 352 pool->max_items = fastreg_pool_size; 353 pool->max_free_pinned = pool->max_items * pool->max_message_size / 4; 354 pool->max_pages = fastreg_message_size; 355 356 /* We never allow more than max_items MRs to be allocated. 357 * When we exceed more than max_items_soft, we start freeing 358 * items more aggressively. 359 * Make sure that max_items > max_items_soft > max_items / 2 360 */ 361 pool->max_items_soft = pool->max_items * 3 / 4; 362 363 return pool; 364} 365 366void rds_iw_get_mr_info(struct rds_iw_device *rds_iwdev, struct rds_info_rdma_connection *iinfo) 367{ 368 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; 369 370 iinfo->rdma_mr_max = pool->max_items; 371 iinfo->rdma_mr_size = pool->max_pages; 372} 373 374void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool *pool) 375{ 376 flush_workqueue(rds_wq); 377 rds_iw_flush_mr_pool(pool, 1); 378 BUG_ON(atomic_read(&pool->item_count)); 379 BUG_ON(atomic_read(&pool->free_pinned)); 380 kfree(pool); 381} 382 383static inline struct rds_iw_mr *rds_iw_reuse_fmr(struct rds_iw_mr_pool *pool) 384{ 385 struct rds_iw_mr *ibmr = NULL; 386 unsigned long flags; 387 388 spin_lock_irqsave(&pool->list_lock, flags); 389 if (!list_empty(&pool->clean_list)) { 390 ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list); 391 list_del_init(&ibmr->mapping.m_list); 392 } 393 spin_unlock_irqrestore(&pool->list_lock, flags); 394 395 return ibmr; 396} 397 398static struct rds_iw_mr *rds_iw_alloc_mr(struct rds_iw_device *rds_iwdev) 399{ 400 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; 401 struct rds_iw_mr *ibmr = NULL; 402 int err = 0, iter = 0; 403 404 while (1) { 405 ibmr = rds_iw_reuse_fmr(pool); 406 if (ibmr) 407 return ibmr; 408 409 /* No clean MRs - now we have the choice of either 410 * allocating a fresh MR up to the limit imposed by the 411 * driver, or flush any dirty unused MRs. 412 * We try to avoid stalling in the send path if possible, 413 * so we allocate as long as we're allowed to. 414 * 415 * We're fussy with enforcing the FMR limit, though. If the driver 416 * tells us we can't use more than N fmrs, we shouldn't start 417 * arguing with it */ 418 if (atomic_inc_return(&pool->item_count) <= pool->max_items) 419 break; 420 421 atomic_dec(&pool->item_count); 422 423 if (++iter > 2) { 424 rds_iw_stats_inc(s_iw_rdma_mr_pool_depleted); 425 return ERR_PTR(-EAGAIN); 426 } 427 428 /* We do have some empty MRs. Flush them out. */ 429 rds_iw_stats_inc(s_iw_rdma_mr_pool_wait); 430 rds_iw_flush_mr_pool(pool, 0); 431 } 432 433 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL); 434 if (!ibmr) { 435 err = -ENOMEM; 436 goto out_no_cigar; 437 } 438 439 spin_lock_init(&ibmr->mapping.m_lock); 440 INIT_LIST_HEAD(&ibmr->mapping.m_list); 441 ibmr->mapping.m_mr = ibmr; 442 443 err = rds_iw_init_fastreg(pool, ibmr); 444 if (err) 445 goto out_no_cigar; 446 447 rds_iw_stats_inc(s_iw_rdma_mr_alloc); 448 return ibmr; 449 450out_no_cigar: 451 if (ibmr) { 452 rds_iw_destroy_fastreg(pool, ibmr); 453 kfree(ibmr); 454 } 455 atomic_dec(&pool->item_count); 456 return ERR_PTR(err); 457} 458 459void rds_iw_sync_mr(void *trans_private, int direction) 460{ 461 struct rds_iw_mr *ibmr = trans_private; 462 struct rds_iw_device *rds_iwdev = ibmr->device; 463 464 switch (direction) { 465 case DMA_FROM_DEVICE: 466 ib_dma_sync_sg_for_cpu(rds_iwdev->dev, ibmr->mapping.m_sg.list, 467 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL); 468 break; 469 case DMA_TO_DEVICE: 470 ib_dma_sync_sg_for_device(rds_iwdev->dev, ibmr->mapping.m_sg.list, 471 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL); 472 break; 473 } 474} 475 476/* 477 * Flush our pool of MRs. 478 * At a minimum, all currently unused MRs are unmapped. 479 * If the number of MRs allocated exceeds the limit, we also try 480 * to free as many MRs as needed to get back to this limit. 481 */ 482static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) 483{ 484 struct rds_iw_mr *ibmr, *next; 485 LIST_HEAD(unmap_list); 486 LIST_HEAD(kill_list); 487 unsigned long flags; 488 unsigned int nfreed = 0, ncleaned = 0, unpinned = 0; 489 int ret = 0; 490 491 rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); 492 493 mutex_lock(&pool->flush_lock); 494 495 spin_lock_irqsave(&pool->list_lock, flags); 496 /* Get the list of all mappings to be destroyed */ 497 list_splice_init(&pool->dirty_list, &unmap_list); 498 if (free_all) 499 list_splice_init(&pool->clean_list, &kill_list); 500 spin_unlock_irqrestore(&pool->list_lock, flags); 501 502 /* Batched invalidate of dirty MRs. 503 * For FMR based MRs, the mappings on the unmap list are 504 * actually members of an ibmr (ibmr->mapping). They either 505 * migrate to the kill_list, or have been cleaned and should be 506 * moved to the clean_list. 507 * For fastregs, they will be dynamically allocated, and 508 * will be destroyed by the unmap function. 509 */ 510 if (!list_empty(&unmap_list)) { 511 ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, 512 &kill_list, &unpinned); 513 /* If we've been asked to destroy all MRs, move those 514 * that were simply cleaned to the kill list */ 515 if (free_all) 516 list_splice_init(&unmap_list, &kill_list); 517 } 518 519 /* Destroy any MRs that are past their best before date */ 520 list_for_each_entry_safe(ibmr, next, &kill_list, mapping.m_list) { 521 rds_iw_stats_inc(s_iw_rdma_mr_free); 522 list_del(&ibmr->mapping.m_list); 523 rds_iw_destroy_fastreg(pool, ibmr); 524 kfree(ibmr); 525 nfreed++; 526 } 527 528 /* Anything that remains are laundered ibmrs, which we can add 529 * back to the clean list. */ 530 if (!list_empty(&unmap_list)) { 531 spin_lock_irqsave(&pool->list_lock, flags); 532 list_splice(&unmap_list, &pool->clean_list); 533 spin_unlock_irqrestore(&pool->list_lock, flags); 534 } 535 536 atomic_sub(unpinned, &pool->free_pinned); 537 atomic_sub(ncleaned, &pool->dirty_count); 538 atomic_sub(nfreed, &pool->item_count); 539 540 mutex_unlock(&pool->flush_lock); 541 return ret; 542} 543 544static void rds_iw_mr_pool_flush_worker(struct work_struct *work) 545{ 546 struct rds_iw_mr_pool *pool = container_of(work, struct rds_iw_mr_pool, flush_worker); 547 548 rds_iw_flush_mr_pool(pool, 0); 549} 550 551void rds_iw_free_mr(void *trans_private, int invalidate) 552{ 553 struct rds_iw_mr *ibmr = trans_private; 554 struct rds_iw_mr_pool *pool = ibmr->device->mr_pool; 555 556 rdsdebug("RDS/IW: free_mr nents %u\n", ibmr->mapping.m_sg.len); 557 if (!pool) 558 return; 559 560 /* Return it to the pool's free list */ 561 rds_iw_free_fastreg(pool, ibmr); 562 563 /* If we've pinned too many pages, request a flush */ 564 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || 565 atomic_read(&pool->dirty_count) >= pool->max_items / 10) 566 queue_work(rds_wq, &pool->flush_worker); 567 568 if (invalidate) { 569 if (likely(!in_interrupt())) { 570 rds_iw_flush_mr_pool(pool, 0); 571 } else { 572 /* We get here if the user created a MR marked 573 * as use_once and invalidate at the same time. */ 574 queue_work(rds_wq, &pool->flush_worker); 575 } 576 } 577} 578 579void rds_iw_flush_mrs(void) 580{ 581 struct rds_iw_device *rds_iwdev; 582 583 list_for_each_entry(rds_iwdev, &rds_iw_devices, list) { 584 struct rds_iw_mr_pool *pool = rds_iwdev->mr_pool; 585 586 if (pool) 587 rds_iw_flush_mr_pool(pool, 0); 588 } 589} 590 591void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents, 592 struct rds_sock *rs, u32 *key_ret) 593{ 594 struct rds_iw_device *rds_iwdev; 595 struct rds_iw_mr *ibmr = NULL; 596 struct rdma_cm_id *cm_id; 597 struct sockaddr_in src = { 598 .sin_addr.s_addr = rs->rs_bound_addr, 599 .sin_port = rs->rs_bound_port, 600 }; 601 struct sockaddr_in dst = { 602 .sin_addr.s_addr = rs->rs_conn_addr, 603 .sin_port = rs->rs_conn_port, 604 }; 605 int ret; 606 607 ret = rds_iw_get_device(&src, &dst, &rds_iwdev, &cm_id); 608 if (ret || !cm_id) { 609 ret = -ENODEV; 610 goto out; 611 } 612 613 if (!rds_iwdev->mr_pool) { 614 ret = -ENODEV; 615 goto out; 616 } 617 618 ibmr = rds_iw_alloc_mr(rds_iwdev); 619 if (IS_ERR(ibmr)) 620 return ibmr; 621 622 ibmr->cm_id = cm_id; 623 ibmr->device = rds_iwdev; 624 625 ret = rds_iw_map_fastreg(rds_iwdev->mr_pool, ibmr, sg, nents); 626 if (ret == 0) 627 *key_ret = ibmr->mr->rkey; 628 else 629 printk(KERN_WARNING "RDS/IW: failed to map mr (errno=%d)\n", ret); 630 631out: 632 if (ret) { 633 if (ibmr) 634 rds_iw_free_mr(ibmr, 0); 635 ibmr = ERR_PTR(ret); 636 } 637 return ibmr; 638} 639 640/* 641 * iWARP fastreg handling 642 * 643 * The life cycle of a fastreg registration is a bit different from 644 * FMRs. 645 * The idea behind fastreg is to have one MR, to which we bind different 646 * mappings over time. To avoid stalling on the expensive map and invalidate 647 * operations, these operations are pipelined on the same send queue on 648 * which we want to send the message containing the r_key. 649 * 650 * This creates a bit of a problem for us, as we do not have the destination 651 * IP in GET_MR, so the connection must be setup prior to the GET_MR call for 652 * RDMA to be correctly setup. If a fastreg request is present, rds_iw_xmit 653 * will try to queue a LOCAL_INV (if needed) and a FAST_REG_MR work request 654 * before queuing the SEND. When completions for these arrive, they are 655 * dispatched to the MR has a bit set showing that RDMa can be performed. 656 * 657 * There is another interesting aspect that's related to invalidation. 658 * The application can request that a mapping is invalidated in FREE_MR. 659 * The expectation there is that this invalidation step includes ALL 660 * PREVIOUSLY FREED MRs. 661 */ 662static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, 663 struct rds_iw_mr *ibmr) 664{ 665 struct rds_iw_device *rds_iwdev = pool->device; 666 struct ib_fast_reg_page_list *page_list = NULL; 667 struct ib_mr *mr; 668 int err; 669 670 mr = ib_alloc_fast_reg_mr(rds_iwdev->pd, pool->max_message_size); 671 if (IS_ERR(mr)) { 672 err = PTR_ERR(mr); 673 674 printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_mr failed (err=%d)\n", err); 675 return err; 676 } 677 678 /* FIXME - this is overkill, but mapping->m_sg.dma_len/mapping->m_sg.dma_npages 679 * is not filled in. 680 */ 681 page_list = ib_alloc_fast_reg_page_list(rds_iwdev->dev, pool->max_message_size); 682 if (IS_ERR(page_list)) { 683 err = PTR_ERR(page_list); 684 685 printk(KERN_WARNING "RDS/IW: ib_alloc_fast_reg_page_list failed (err=%d)\n", err); 686 ib_dereg_mr(mr); 687 return err; 688 } 689 690 ibmr->page_list = page_list; 691 ibmr->mr = mr; 692 return 0; 693} 694 695static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping) 696{ 697 struct rds_iw_mr *ibmr = mapping->m_mr; 698 struct ib_send_wr f_wr, *failed_wr; 699 int ret; 700 701 /* 702 * Perform a WR for the fast_reg_mr. Each individual page 703 * in the sg list is added to the fast reg page list and placed 704 * inside the fast_reg_mr WR. The key used is a rolling 8bit 705 * counter, which should guarantee uniqueness. 706 */ 707 ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++); 708 mapping->m_rkey = ibmr->mr->rkey; 709 710 memset(&f_wr, 0, sizeof(f_wr)); 711 f_wr.wr_id = RDS_IW_FAST_REG_WR_ID; 712 f_wr.opcode = IB_WR_FAST_REG_MR; 713 f_wr.wr.fast_reg.length = mapping->m_sg.bytes; 714 f_wr.wr.fast_reg.rkey = mapping->m_rkey; 715 f_wr.wr.fast_reg.page_list = ibmr->page_list; 716 f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len; 717 f_wr.wr.fast_reg.page_shift = PAGE_SHIFT; 718 f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE | 719 IB_ACCESS_REMOTE_READ | 720 IB_ACCESS_REMOTE_WRITE; 721 f_wr.wr.fast_reg.iova_start = 0; 722 f_wr.send_flags = IB_SEND_SIGNALED; 723 724 failed_wr = &f_wr; 725 ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr); 726 BUG_ON(failed_wr != &f_wr); 727 if (ret) 728 printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", 729 __func__, __LINE__, ret); 730 return ret; 731} 732 733static int rds_iw_rdma_fastreg_inv(struct rds_iw_mr *ibmr) 734{ 735 struct ib_send_wr s_wr, *failed_wr; 736 int ret = 0; 737 738 if (!ibmr->cm_id->qp || !ibmr->mr) 739 goto out; 740 741 memset(&s_wr, 0, sizeof(s_wr)); 742 s_wr.wr_id = RDS_IW_LOCAL_INV_WR_ID; 743 s_wr.opcode = IB_WR_LOCAL_INV; 744 s_wr.ex.invalidate_rkey = ibmr->mr->rkey; 745 s_wr.send_flags = IB_SEND_SIGNALED; 746 747 failed_wr = &s_wr; 748 ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr); 749 if (ret) { 750 printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", 751 __func__, __LINE__, ret); 752 goto out; 753 } 754out: 755 return ret; 756} 757 758static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, 759 struct rds_iw_mr *ibmr, 760 struct scatterlist *sg, 761 unsigned int sg_len) 762{ 763 struct rds_iw_device *rds_iwdev = pool->device; 764 struct rds_iw_mapping *mapping = &ibmr->mapping; 765 u64 *dma_pages; 766 int i, ret = 0; 767 768 rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len); 769 770 dma_pages = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg); 771 if (IS_ERR(dma_pages)) { 772 ret = PTR_ERR(dma_pages); 773 dma_pages = NULL; 774 goto out; 775 } 776 777 if (mapping->m_sg.dma_len > pool->max_message_size) { 778 ret = -EMSGSIZE; 779 goto out; 780 } 781 782 for (i = 0; i < mapping->m_sg.dma_npages; ++i) 783 ibmr->page_list->page_list[i] = dma_pages[i]; 784 785 ret = rds_iw_rdma_build_fastreg(mapping); 786 if (ret) 787 goto out; 788 789 rds_iw_stats_inc(s_iw_rdma_mr_used); 790 791out: 792 kfree(dma_pages); 793 794 return ret; 795} 796 797/* 798 * "Free" a fastreg MR. 799 */ 800static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, 801 struct rds_iw_mr *ibmr) 802{ 803 unsigned long flags; 804 int ret; 805 806 if (!ibmr->mapping.m_sg.dma_len) 807 return; 808 809 ret = rds_iw_rdma_fastreg_inv(ibmr); 810 if (ret) 811 return; 812 813 /* Try to post the LOCAL_INV WR to the queue. */ 814 spin_lock_irqsave(&pool->list_lock, flags); 815 816 list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list); 817 atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned); 818 atomic_inc(&pool->dirty_count); 819 820 spin_unlock_irqrestore(&pool->list_lock, flags); 821} 822 823static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, 824 struct list_head *unmap_list, 825 struct list_head *kill_list, 826 int *unpinned) 827{ 828 struct rds_iw_mapping *mapping, *next; 829 unsigned int ncleaned = 0; 830 LIST_HEAD(laundered); 831 832 /* Batched invalidation of fastreg MRs. 833 * Why do we do it this way, even though we could pipeline unmap 834 * and remap? The reason is the application semantics - when the 835 * application requests an invalidation of MRs, it expects all 836 * previously released R_Keys to become invalid. 837 * 838 * If we implement MR reuse naively, we risk memory corruption 839 * (this has actually been observed). So the default behavior 840 * requires that a MR goes through an explicit unmap operation before 841 * we can reuse it again. 842 * 843 * We could probably improve on this a little, by allowing immediate 844 * reuse of a MR on the same socket (eg you could add small 845 * cache of unused MRs to strct rds_socket - GET_MR could grab one 846 * of these without requiring an explicit invalidate). 847 */ 848 while (!list_empty(unmap_list)) { 849 unsigned long flags; 850 851 spin_lock_irqsave(&pool->list_lock, flags); 852 list_for_each_entry_safe(mapping, next, unmap_list, m_list) { 853 *unpinned += mapping->m_sg.len; 854 list_move(&mapping->m_list, &laundered); 855 ncleaned++; 856 } 857 spin_unlock_irqrestore(&pool->list_lock, flags); 858 } 859 860 /* Move all laundered mappings back to the unmap list. 861 * We do not kill any WRs right now - it doesn't seem the 862 * fastreg API has a max_remap limit. */ 863 list_splice_init(&laundered, unmap_list); 864 865 return ncleaned; 866} 867 868static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, 869 struct rds_iw_mr *ibmr) 870{ 871 if (ibmr->page_list) 872 ib_free_fast_reg_page_list(ibmr->page_list); 873 if (ibmr->mr) 874 ib_dereg_mr(ibmr->mr); 875} 876