1/* 2 * IBM eServer eHCA Infiniband device driver for Linux on POWER 3 * 4 * address vector functions 5 * 6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com> 7 * Khadija Souissi <souissik@de.ibm.com> 8 * Reinhard Ernst <rernst@de.ibm.com> 9 * Christoph Raisch <raisch@de.ibm.com> 10 * 11 * Copyright (c) 2005 IBM Corporation 12 * 13 * All rights reserved. 14 * 15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB 16 * BSD. 17 * 18 * OpenIB BSD License 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions are met: 22 * 23 * Redistributions of source code must retain the above copyright notice, this 24 * list of conditions and the following disclaimer. 25 * 26 * Redistributions in binary form must reproduce the above copyright notice, 27 * this list of conditions and the following disclaimer in the documentation 28 * and/or other materials 29 * provided with the distribution. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 41 * POSSIBILITY OF SUCH DAMAGE. 42 */ 43 44#include <linux/slab.h> 45 46#include "ehca_tools.h" 47#include "ehca_iverbs.h" 48#include "hcp_if.h" 49 50static struct kmem_cache *av_cache; 51 52int ehca_calc_ipd(struct ehca_shca *shca, int port, 53 enum ib_rate path_rate, u32 *ipd) 54{ 55 int path = ib_rate_to_mult(path_rate); 56 int link, ret; 57 struct ib_port_attr pa; 58 59 if (path_rate == IB_RATE_PORT_CURRENT) { 60 *ipd = 0; 61 return 0; 62 } 63 64 if (unlikely(path < 0)) { 65 ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x", 66 path_rate); 67 return -EINVAL; 68 } 69 70 ret = ehca_query_port(&shca->ib_device, port, &pa); 71 if (unlikely(ret < 0)) { 72 ehca_err(&shca->ib_device, "Failed to query port ret=%i", ret); 73 return ret; 74 } 75 76 link = ib_width_enum_to_int(pa.active_width) * pa.active_speed; 77 78 if (path >= link) 79 /* no need to throttle if path faster than link */ 80 *ipd = 0; 81 else 82 /* IPD = round((link / path) - 1) */ 83 *ipd = ((link + (path >> 1)) / path) - 1; 84 85 return 0; 86} 87 88struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) 89{ 90 int ret; 91 struct ehca_av *av; 92 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, 93 ib_device); 94 95 av = kmem_cache_alloc(av_cache, GFP_KERNEL); 96 if (!av) { 97 ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p", 98 pd, ah_attr); 99 return ERR_PTR(-ENOMEM); 100 } 101 102 av->av.sl = ah_attr->sl; 103 av->av.dlid = ah_attr->dlid; 104 av->av.slid_path_bits = ah_attr->src_path_bits; 105 106 if (ehca_static_rate < 0) { 107 u32 ipd; 108 if (ehca_calc_ipd(shca, ah_attr->port_num, 109 ah_attr->static_rate, &ipd)) { 110 ret = -EINVAL; 111 goto create_ah_exit1; 112 } 113 av->av.ipd = ipd; 114 } else 115 av->av.ipd = ehca_static_rate; 116 117 av->av.lnh = ah_attr->ah_flags; 118 av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6); 119 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK, 120 ah_attr->grh.traffic_class); 121 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK, 122 ah_attr->grh.flow_label); 123 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK, 124 ah_attr->grh.hop_limit); 125 av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B); 126 /* set sgid in grh.word_1 */ 127 if (ah_attr->ah_flags & IB_AH_GRH) { 128 int rc; 129 struct ib_port_attr port_attr; 130 union ib_gid gid; 131 memset(&port_attr, 0, sizeof(port_attr)); 132 rc = ehca_query_port(pd->device, ah_attr->port_num, 133 &port_attr); 134 if (rc) { /* invalid port number */ 135 ret = -EINVAL; 136 ehca_err(pd->device, "Invalid port number " 137 "ehca_query_port() returned %x " 138 "pd=%p ah_attr=%p", rc, pd, ah_attr); 139 goto create_ah_exit1; 140 } 141 memset(&gid, 0, sizeof(gid)); 142 rc = ehca_query_gid(pd->device, 143 ah_attr->port_num, 144 ah_attr->grh.sgid_index, &gid); 145 if (rc) { 146 ret = -EINVAL; 147 ehca_err(pd->device, "Failed to retrieve sgid " 148 "ehca_query_gid() returned %x " 149 "pd=%p ah_attr=%p", rc, pd, ah_attr); 150 goto create_ah_exit1; 151 } 152 memcpy(&av->av.grh.word_1, &gid, sizeof(gid)); 153 } 154 av->av.pmtu = shca->max_mtu; 155 156 /* dgid comes in grh.word_3 */ 157 memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid, 158 sizeof(ah_attr->grh.dgid)); 159 160 return &av->ib_ah; 161 162create_ah_exit1: 163 kmem_cache_free(av_cache, av); 164 165 return ERR_PTR(ret); 166} 167 168int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 169{ 170 struct ehca_av *av; 171 struct ehca_ud_av new_ehca_av; 172 struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca, 173 ib_device); 174 175 memset(&new_ehca_av, 0, sizeof(new_ehca_av)); 176 new_ehca_av.sl = ah_attr->sl; 177 new_ehca_av.dlid = ah_attr->dlid; 178 new_ehca_av.slid_path_bits = ah_attr->src_path_bits; 179 new_ehca_av.ipd = ah_attr->static_rate; 180 new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK, 181 (ah_attr->ah_flags & IB_AH_GRH) > 0); 182 new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK, 183 ah_attr->grh.traffic_class); 184 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK, 185 ah_attr->grh.flow_label); 186 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK, 187 ah_attr->grh.hop_limit); 188 new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b); 189 190 /* set sgid in grh.word_1 */ 191 if (ah_attr->ah_flags & IB_AH_GRH) { 192 int rc; 193 struct ib_port_attr port_attr; 194 union ib_gid gid; 195 memset(&port_attr, 0, sizeof(port_attr)); 196 rc = ehca_query_port(ah->device, ah_attr->port_num, 197 &port_attr); 198 if (rc) { /* invalid port number */ 199 ehca_err(ah->device, "Invalid port number " 200 "ehca_query_port() returned %x " 201 "ah=%p ah_attr=%p port_num=%x", 202 rc, ah, ah_attr, ah_attr->port_num); 203 return -EINVAL; 204 } 205 memset(&gid, 0, sizeof(gid)); 206 rc = ehca_query_gid(ah->device, 207 ah_attr->port_num, 208 ah_attr->grh.sgid_index, &gid); 209 if (rc) { 210 ehca_err(ah->device, "Failed to retrieve sgid " 211 "ehca_query_gid() returned %x " 212 "ah=%p ah_attr=%p port_num=%x " 213 "sgid_index=%x", 214 rc, ah, ah_attr, ah_attr->port_num, 215 ah_attr->grh.sgid_index); 216 return -EINVAL; 217 } 218 memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid)); 219 } 220 221 new_ehca_av.pmtu = shca->max_mtu; 222 223 memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid, 224 sizeof(ah_attr->grh.dgid)); 225 226 av = container_of(ah, struct ehca_av, ib_ah); 227 av->av = new_ehca_av; 228 229 return 0; 230} 231 232int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) 233{ 234 struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah); 235 236 memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3, 237 sizeof(ah_attr->grh.dgid)); 238 ah_attr->sl = av->av.sl; 239 240 ah_attr->dlid = av->av.dlid; 241 242 ah_attr->src_path_bits = av->av.slid_path_bits; 243 ah_attr->static_rate = av->av.ipd; 244 ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh); 245 ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK, 246 av->av.grh.word_0); 247 ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK, 248 av->av.grh.word_0); 249 ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK, 250 av->av.grh.word_0); 251 252 return 0; 253} 254 255int ehca_destroy_ah(struct ib_ah *ah) 256{ 257 kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah)); 258 259 return 0; 260} 261 262int ehca_init_av_cache(void) 263{ 264 av_cache = kmem_cache_create("ehca_cache_av", 265 sizeof(struct ehca_av), 0, 266 SLAB_HWCACHE_ALIGN, 267 NULL); 268 if (!av_cache) 269 return -ENOMEM; 270 return 0; 271} 272 273void ehca_cleanup_av_cache(void) 274{ 275 if (av_cache) 276 kmem_cache_destroy(av_cache); 277} 278