root/net/rds/cong.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rds_cong_tree_walk
  2. rds_cong_from_addr
  3. rds_cong_add_conn
  4. rds_cong_remove_conn
  5. rds_cong_get_maps
  6. rds_cong_queue_updates
  7. rds_cong_map_updated
  8. rds_cong_updated_since
  9. rds_cong_set_bit
  10. rds_cong_clear_bit
  11. rds_cong_test_bit
  12. rds_cong_add_socket
  13. rds_cong_remove_socket
  14. rds_cong_wait
  15. rds_cong_exit
  16. rds_cong_update_alloc

   1 /*
   2  * Copyright (c) 2007, 2017 Oracle and/or its affiliates. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  *
  32  */
  33 #include <linux/slab.h>
  34 #include <linux/types.h>
  35 #include <linux/rbtree.h>
  36 #include <linux/bitops.h>
  37 #include <linux/export.h>
  38 
  39 #include "rds.h"
  40 
  41 /*
  42  * This file implements the receive side of the unconventional congestion
  43  * management in RDS.
  44  *
  45  * Messages waiting in the receive queue on the receiving socket are accounted
  46  * against the sockets SO_RCVBUF option value.  Only the payload bytes in the
  47  * message are accounted for.  If the number of bytes queued equals or exceeds
  48  * rcvbuf then the socket is congested.  All sends attempted to this socket's
  49  * address should return block or return -EWOULDBLOCK.
  50  *
  51  * Applications are expected to be reasonably tuned such that this situation
  52  * very rarely occurs.  An application encountering this "back-pressure" is
  53  * considered a bug.
  54  *
  55  * This is implemented by having each node maintain bitmaps which indicate
  56  * which ports on bound addresses are congested.  As the bitmap changes it is
  57  * sent through all the connections which terminate in the local address of the
  58  * bitmap which changed.
  59  *
  60  * The bitmaps are allocated as connections are brought up.  This avoids
  61  * allocation in the interrupt handling path which queues messages on sockets.
  62  * The dense bitmaps let transports send the entire bitmap on any bitmap change
  63  * reasonably efficiently.  This is much easier to implement than some
  64  * finer-grained communication of per-port congestion.  The sender does a very
  65  * inexpensive bit test to test if the port it's about to send to is congested
  66  * or not.
  67  */
  68 
  69 /*
  70  * Interaction with poll is a tad tricky. We want all processes stuck in
  71  * poll to wake up and check whether a congested destination became uncongested.
  72  * The really sad thing is we have no idea which destinations the application
  73  * wants to send to - we don't even know which rds_connections are involved.
  74  * So until we implement a more flexible rds poll interface, we have to make
  75  * do with this:
  76  * We maintain a global counter that is incremented each time a congestion map
  77  * update is received. Each rds socket tracks this value, and if rds_poll
  78  * finds that the saved generation number is smaller than the global generation
  79  * number, it wakes up the process.
  80  */
  81 static atomic_t         rds_cong_generation = ATOMIC_INIT(0);
  82 
  83 /*
  84  * Congestion monitoring
  85  */
  86 static LIST_HEAD(rds_cong_monitor);
  87 static DEFINE_RWLOCK(rds_cong_monitor_lock);
  88 
  89 /*
  90  * Yes, a global lock.  It's used so infrequently that it's worth keeping it
  91  * global to simplify the locking.  It's only used in the following
  92  * circumstances:
  93  *
  94  *  - on connection buildup to associate a conn with its maps
  95  *  - on map changes to inform conns of a new map to send
  96  *
  97  *  It's sadly ordered under the socket callback lock and the connection lock.
  98  *  Receive paths can mark ports congested from interrupt context so the
  99  *  lock masks interrupts.
 100  */
 101 static DEFINE_SPINLOCK(rds_cong_lock);
 102 static struct rb_root rds_cong_tree = RB_ROOT;
 103 
 104 static struct rds_cong_map *rds_cong_tree_walk(const struct in6_addr *addr,
 105                                                struct rds_cong_map *insert)
 106 {
 107         struct rb_node **p = &rds_cong_tree.rb_node;
 108         struct rb_node *parent = NULL;
 109         struct rds_cong_map *map;
 110 
 111         while (*p) {
 112                 int diff;
 113 
 114                 parent = *p;
 115                 map = rb_entry(parent, struct rds_cong_map, m_rb_node);
 116 
 117                 diff = rds_addr_cmp(addr, &map->m_addr);
 118                 if (diff < 0)
 119                         p = &(*p)->rb_left;
 120                 else if (diff > 0)
 121                         p = &(*p)->rb_right;
 122                 else
 123                         return map;
 124         }
 125 
 126         if (insert) {
 127                 rb_link_node(&insert->m_rb_node, parent, p);
 128                 rb_insert_color(&insert->m_rb_node, &rds_cong_tree);
 129         }
 130         return NULL;
 131 }
 132 
 133 /*
 134  * There is only ever one bitmap for any address.  Connections try and allocate
 135  * these bitmaps in the process getting pointers to them.  The bitmaps are only
 136  * ever freed as the module is removed after all connections have been freed.
 137  */
 138 static struct rds_cong_map *rds_cong_from_addr(const struct in6_addr *addr)
 139 {
 140         struct rds_cong_map *map;
 141         struct rds_cong_map *ret = NULL;
 142         unsigned long zp;
 143         unsigned long i;
 144         unsigned long flags;
 145 
 146         map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
 147         if (!map)
 148                 return NULL;
 149 
 150         map->m_addr = *addr;
 151         init_waitqueue_head(&map->m_waitq);
 152         INIT_LIST_HEAD(&map->m_conn_list);
 153 
 154         for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
 155                 zp = get_zeroed_page(GFP_KERNEL);
 156                 if (zp == 0)
 157                         goto out;
 158                 map->m_page_addrs[i] = zp;
 159         }
 160 
 161         spin_lock_irqsave(&rds_cong_lock, flags);
 162         ret = rds_cong_tree_walk(addr, map);
 163         spin_unlock_irqrestore(&rds_cong_lock, flags);
 164 
 165         if (!ret) {
 166                 ret = map;
 167                 map = NULL;
 168         }
 169 
 170 out:
 171         if (map) {
 172                 for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
 173                         free_page(map->m_page_addrs[i]);
 174                 kfree(map);
 175         }
 176 
 177         rdsdebug("map %p for addr %pI6c\n", ret, addr);
 178 
 179         return ret;
 180 }
 181 
 182 /*
 183  * Put the conn on its local map's list.  This is called when the conn is
 184  * really added to the hash.  It's nested under the rds_conn_lock, sadly.
 185  */
 186 void rds_cong_add_conn(struct rds_connection *conn)
 187 {
 188         unsigned long flags;
 189 
 190         rdsdebug("conn %p now on map %p\n", conn, conn->c_lcong);
 191         spin_lock_irqsave(&rds_cong_lock, flags);
 192         list_add_tail(&conn->c_map_item, &conn->c_lcong->m_conn_list);
 193         spin_unlock_irqrestore(&rds_cong_lock, flags);
 194 }
 195 
 196 void rds_cong_remove_conn(struct rds_connection *conn)
 197 {
 198         unsigned long flags;
 199 
 200         rdsdebug("removing conn %p from map %p\n", conn, conn->c_lcong);
 201         spin_lock_irqsave(&rds_cong_lock, flags);
 202         list_del_init(&conn->c_map_item);
 203         spin_unlock_irqrestore(&rds_cong_lock, flags);
 204 }
 205 
 206 int rds_cong_get_maps(struct rds_connection *conn)
 207 {
 208         conn->c_lcong = rds_cong_from_addr(&conn->c_laddr);
 209         conn->c_fcong = rds_cong_from_addr(&conn->c_faddr);
 210 
 211         if (!(conn->c_lcong && conn->c_fcong))
 212                 return -ENOMEM;
 213 
 214         return 0;
 215 }
 216 
 217 void rds_cong_queue_updates(struct rds_cong_map *map)
 218 {
 219         struct rds_connection *conn;
 220         unsigned long flags;
 221 
 222         spin_lock_irqsave(&rds_cong_lock, flags);
 223 
 224         list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
 225                 struct rds_conn_path *cp = &conn->c_path[0];
 226 
 227                 rcu_read_lock();
 228                 if (!test_and_set_bit(0, &conn->c_map_queued) &&
 229                     !rds_destroy_pending(cp->cp_conn)) {
 230                         rds_stats_inc(s_cong_update_queued);
 231                         /* We cannot inline the call to rds_send_xmit() here
 232                          * for two reasons (both pertaining to a TCP transport):
 233                          * 1. When we get here from the receive path, we
 234                          *    are already holding the sock_lock (held by
 235                          *    tcp_v4_rcv()). So inlining calls to
 236                          *    tcp_setsockopt and/or tcp_sendmsg will deadlock
 237                          *    when it tries to get the sock_lock())
 238                          * 2. Interrupts are masked so that we can mark the
 239                          *    the port congested from both send and recv paths.
 240                          *    (See comment around declaration of rdc_cong_lock).
 241                          *    An attempt to get the sock_lock() here will
 242                          *    therefore trigger warnings.
 243                          * Defer the xmit to rds_send_worker() instead.
 244                          */
 245                         queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
 246                 }
 247                 rcu_read_unlock();
 248         }
 249 
 250         spin_unlock_irqrestore(&rds_cong_lock, flags);
 251 }
 252 
 253 void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
 254 {
 255         rdsdebug("waking map %p for %pI4\n",
 256           map, &map->m_addr);
 257         rds_stats_inc(s_cong_update_received);
 258         atomic_inc(&rds_cong_generation);
 259         if (waitqueue_active(&map->m_waitq))
 260                 wake_up(&map->m_waitq);
 261         if (waitqueue_active(&rds_poll_waitq))
 262                 wake_up_all(&rds_poll_waitq);
 263 
 264         if (portmask && !list_empty(&rds_cong_monitor)) {
 265                 unsigned long flags;
 266                 struct rds_sock *rs;
 267 
 268                 read_lock_irqsave(&rds_cong_monitor_lock, flags);
 269                 list_for_each_entry(rs, &rds_cong_monitor, rs_cong_list) {
 270                         spin_lock(&rs->rs_lock);
 271                         rs->rs_cong_notify |= (rs->rs_cong_mask & portmask);
 272                         rs->rs_cong_mask &= ~portmask;
 273                         spin_unlock(&rs->rs_lock);
 274                         if (rs->rs_cong_notify)
 275                                 rds_wake_sk_sleep(rs);
 276                 }
 277                 read_unlock_irqrestore(&rds_cong_monitor_lock, flags);
 278         }
 279 }
 280 EXPORT_SYMBOL_GPL(rds_cong_map_updated);
 281 
 282 int rds_cong_updated_since(unsigned long *recent)
 283 {
 284         unsigned long gen = atomic_read(&rds_cong_generation);
 285 
 286         if (likely(*recent == gen))
 287                 return 0;
 288         *recent = gen;
 289         return 1;
 290 }
 291 
 292 /*
 293  * We're called under the locking that protects the sockets receive buffer
 294  * consumption.  This makes it a lot easier for the caller to only call us
 295  * when it knows that an existing set bit needs to be cleared, and vice versa.
 296  * We can't block and we need to deal with concurrent sockets working against
 297  * the same per-address map.
 298  */
 299 void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
 300 {
 301         unsigned long i;
 302         unsigned long off;
 303 
 304         rdsdebug("setting congestion for %pI4:%u in map %p\n",
 305           &map->m_addr, ntohs(port), map);
 306 
 307         i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
 308         off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 309 
 310         set_bit_le(off, (void *)map->m_page_addrs[i]);
 311 }
 312 
 313 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
 314 {
 315         unsigned long i;
 316         unsigned long off;
 317 
 318         rdsdebug("clearing congestion for %pI4:%u in map %p\n",
 319           &map->m_addr, ntohs(port), map);
 320 
 321         i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
 322         off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 323 
 324         clear_bit_le(off, (void *)map->m_page_addrs[i]);
 325 }
 326 
 327 static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
 328 {
 329         unsigned long i;
 330         unsigned long off;
 331 
 332         i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
 333         off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 334 
 335         return test_bit_le(off, (void *)map->m_page_addrs[i]);
 336 }
 337 
 338 void rds_cong_add_socket(struct rds_sock *rs)
 339 {
 340         unsigned long flags;
 341 
 342         write_lock_irqsave(&rds_cong_monitor_lock, flags);
 343         if (list_empty(&rs->rs_cong_list))
 344                 list_add(&rs->rs_cong_list, &rds_cong_monitor);
 345         write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
 346 }
 347 
 348 void rds_cong_remove_socket(struct rds_sock *rs)
 349 {
 350         unsigned long flags;
 351         struct rds_cong_map *map;
 352 
 353         write_lock_irqsave(&rds_cong_monitor_lock, flags);
 354         list_del_init(&rs->rs_cong_list);
 355         write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
 356 
 357         /* update congestion map for now-closed port */
 358         spin_lock_irqsave(&rds_cong_lock, flags);
 359         map = rds_cong_tree_walk(&rs->rs_bound_addr, NULL);
 360         spin_unlock_irqrestore(&rds_cong_lock, flags);
 361 
 362         if (map && rds_cong_test_bit(map, rs->rs_bound_port)) {
 363                 rds_cong_clear_bit(map, rs->rs_bound_port);
 364                 rds_cong_queue_updates(map);
 365         }
 366 }
 367 
 368 int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock,
 369                   struct rds_sock *rs)
 370 {
 371         if (!rds_cong_test_bit(map, port))
 372                 return 0;
 373         if (nonblock) {
 374                 if (rs && rs->rs_cong_monitor) {
 375                         unsigned long flags;
 376 
 377                         /* It would have been nice to have an atomic set_bit on
 378                          * a uint64_t. */
 379                         spin_lock_irqsave(&rs->rs_lock, flags);
 380                         rs->rs_cong_mask |= RDS_CONG_MONITOR_MASK(ntohs(port));
 381                         spin_unlock_irqrestore(&rs->rs_lock, flags);
 382 
 383                         /* Test again - a congestion update may have arrived in
 384                          * the meantime. */
 385                         if (!rds_cong_test_bit(map, port))
 386                                 return 0;
 387                 }
 388                 rds_stats_inc(s_cong_send_error);
 389                 return -ENOBUFS;
 390         }
 391 
 392         rds_stats_inc(s_cong_send_blocked);
 393         rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port));
 394 
 395         return wait_event_interruptible(map->m_waitq,
 396                                         !rds_cong_test_bit(map, port));
 397 }
 398 
 399 void rds_cong_exit(void)
 400 {
 401         struct rb_node *node;
 402         struct rds_cong_map *map;
 403         unsigned long i;
 404 
 405         while ((node = rb_first(&rds_cong_tree))) {
 406                 map = rb_entry(node, struct rds_cong_map, m_rb_node);
 407                 rdsdebug("freeing map %p\n", map);
 408                 rb_erase(&map->m_rb_node, &rds_cong_tree);
 409                 for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
 410                         free_page(map->m_page_addrs[i]);
 411                 kfree(map);
 412         }
 413 }
 414 
 415 /*
 416  * Allocate a RDS message containing a congestion update.
 417  */
 418 struct rds_message *rds_cong_update_alloc(struct rds_connection *conn)
 419 {
 420         struct rds_cong_map *map = conn->c_lcong;
 421         struct rds_message *rm;
 422 
 423         rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES);
 424         if (!IS_ERR(rm))
 425                 rm->m_inc.i_hdr.h_flags = RDS_FLAG_CONG_BITMAP;
 426 
 427         return rm;
 428 }

/* [<][>][^][v][top][bottom][index][help] */