This source file includes following definitions.
- ocrdma_hdr_type_to_proto_num
- set_av_attr
- ocrdma_create_ah
- ocrdma_destroy_ah
- ocrdma_query_ah
- ocrdma_process_mad
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45
46 #include <rdma/ib_addr.h>
47 #include <rdma/ib_mad.h>
48 #include <rdma/ib_cache.h>
49
50 #include "ocrdma.h"
51 #include "ocrdma_verbs.h"
52 #include "ocrdma_ah.h"
53 #include "ocrdma_hw.h"
54 #include "ocrdma_stats.h"
55
56 #define OCRDMA_VID_PCP_SHIFT 0xD
57
58 static u16 ocrdma_hdr_type_to_proto_num(int devid, u8 hdr_type)
59 {
60 switch (hdr_type) {
61 case OCRDMA_L3_TYPE_IB_GRH:
62 return (u16)ETH_P_IBOE;
63 case OCRDMA_L3_TYPE_IPV4:
64 return (u16)0x0800;
65 case OCRDMA_L3_TYPE_IPV6:
66 return (u16)0x86dd;
67 default:
68 pr_err("ocrdma%d: Invalid network header\n", devid);
69 return 0;
70 }
71 }
72
73 static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
74 struct rdma_ah_attr *attr, const union ib_gid *sgid,
75 int pdid, bool *isvlan, u16 vlan_tag)
76 {
77 int status;
78 struct ocrdma_eth_vlan eth;
79 struct ocrdma_grh grh;
80 int eth_sz;
81 u16 proto_num = 0;
82 u8 nxthdr = 0x11;
83 struct iphdr ipv4;
84 const struct ib_global_route *ib_grh;
85 union {
86 struct sockaddr_in _sockaddr_in;
87 struct sockaddr_in6 _sockaddr_in6;
88 } sgid_addr, dgid_addr;
89
90 memset(ð, 0, sizeof(eth));
91 memset(&grh, 0, sizeof(grh));
92
93
94 proto_num = ocrdma_hdr_type_to_proto_num(dev->id, ah->hdr_type);
95 if (!proto_num)
96 return -EINVAL;
97 nxthdr = (proto_num == ETH_P_IBOE) ? 0x1b : 0x11;
98
99 if (!vlan_tag || (vlan_tag > 0xFFF))
100 vlan_tag = dev->pvid;
101 if (vlan_tag || dev->pfc_state) {
102 if (!vlan_tag) {
103 pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
104 dev->id);
105 pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
106 dev->id);
107 }
108 eth.eth_type = cpu_to_be16(0x8100);
109 eth.roce_eth_type = cpu_to_be16(proto_num);
110 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
111 eth.vlan_tag = cpu_to_be16(vlan_tag);
112 eth_sz = sizeof(struct ocrdma_eth_vlan);
113 *isvlan = true;
114 } else {
115 eth.eth_type = cpu_to_be16(proto_num);
116 eth_sz = sizeof(struct ocrdma_eth_basic);
117 }
118
119 memcpy(ð.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
120 status = ocrdma_resolve_dmac(dev, attr, ð.dmac[0]);
121 if (status)
122 return status;
123 ib_grh = rdma_ah_read_grh(attr);
124 ah->sgid_index = ib_grh->sgid_index;
125
126 memcpy(&ah->av->eth_hdr, ð, eth_sz);
127 if (ah->hdr_type == RDMA_NETWORK_IPV4) {
128 *((__be16 *)&ipv4) = htons((4 << 12) | (5 << 8) |
129 ib_grh->traffic_class);
130 ipv4.id = cpu_to_be16(pdid);
131 ipv4.frag_off = htons(IP_DF);
132 ipv4.tot_len = htons(0);
133 ipv4.ttl = ib_grh->hop_limit;
134 ipv4.protocol = nxthdr;
135 rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
136 ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
137 rdma_gid2ip((struct sockaddr*)&dgid_addr, &ib_grh->dgid);
138 ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
139 memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
140 } else {
141 memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
142 grh.tclass_flow = cpu_to_be32((6 << 28) |
143 (ib_grh->traffic_class << 24) |
144 ib_grh->flow_label);
145 memcpy(&grh.dgid[0], ib_grh->dgid.raw,
146 sizeof(ib_grh->dgid.raw));
147 grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
148 (nxthdr << 8) |
149 ib_grh->hop_limit);
150 memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
151 }
152 if (*isvlan)
153 ah->av->valid |= OCRDMA_AV_VLAN_VALID;
154 ah->av->valid = cpu_to_le32(ah->av->valid);
155 return status;
156 }
157
158 int ocrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
159 struct ib_udata *udata)
160 {
161 u32 *ahid_addr;
162 int status;
163 struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
164 bool isvlan = false;
165 u16 vlan_tag = 0xffff;
166 const struct ib_gid_attr *sgid_attr;
167 struct ocrdma_pd *pd = get_ocrdma_pd(ibah->pd);
168 struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
169
170 if ((attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
171 !(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))
172 return -EINVAL;
173
174 if (atomic_cmpxchg(&dev->update_sl, 1, 0))
175 ocrdma_init_service_level(dev);
176
177 sgid_attr = attr->grh.sgid_attr;
178 status = rdma_read_gid_l2_fields(sgid_attr, &vlan_tag, NULL);
179 if (status)
180 return status;
181
182 status = ocrdma_alloc_av(dev, ah);
183 if (status)
184 goto av_err;
185
186
187 ah->hdr_type = rdma_gid_attr_network_type(sgid_attr);
188
189 status = set_av_attr(dev, ah, attr, &sgid_attr->gid, pd->id,
190 &isvlan, vlan_tag);
191 if (status)
192 goto av_conf_err;
193
194
195 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
196 ahid_addr = pd->uctx->ah_tbl.va + rdma_ah_get_dlid(attr);
197 *ahid_addr = 0;
198 *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
199 if (ocrdma_is_udp_encap_supported(dev)) {
200 *ahid_addr |= ((u32)ah->hdr_type &
201 OCRDMA_AH_L3_TYPE_MASK) <<
202 OCRDMA_AH_L3_TYPE_SHIFT;
203 }
204 if (isvlan)
205 *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
206 OCRDMA_AH_VLAN_VALID_SHIFT);
207 }
208
209 return 0;
210
211 av_conf_err:
212 ocrdma_free_av(dev, ah);
213 av_err:
214 return status;
215 }
216
217 void ocrdma_destroy_ah(struct ib_ah *ibah, u32 flags)
218 {
219 struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
220 struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
221
222 ocrdma_free_av(dev, ah);
223 }
224
225 int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
226 {
227 struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
228 struct ocrdma_av *av = ah->av;
229 struct ocrdma_grh *grh;
230
231 attr->type = ibah->type;
232 if (ah->av->valid & OCRDMA_AV_VALID) {
233 grh = (struct ocrdma_grh *)((u8 *)ah->av +
234 sizeof(struct ocrdma_eth_vlan));
235 rdma_ah_set_sl(attr, be16_to_cpu(av->eth_hdr.vlan_tag) >> 13);
236 } else {
237 grh = (struct ocrdma_grh *)((u8 *)ah->av +
238 sizeof(struct ocrdma_eth_basic));
239 rdma_ah_set_sl(attr, 0);
240 }
241 rdma_ah_set_grh(attr, NULL,
242 be32_to_cpu(grh->tclass_flow) & 0xffffffff,
243 ah->sgid_index,
244 be32_to_cpu(grh->pdid_hoplimit) & 0xff,
245 be32_to_cpu(grh->tclass_flow) >> 24);
246 rdma_ah_set_dgid_raw(attr, &grh->dgid[0]);
247 return 0;
248 }
249
250 int ocrdma_process_mad(struct ib_device *ibdev,
251 int process_mad_flags,
252 u8 port_num,
253 const struct ib_wc *in_wc,
254 const struct ib_grh *in_grh,
255 const struct ib_mad_hdr *in, size_t in_mad_size,
256 struct ib_mad_hdr *out, size_t *out_mad_size,
257 u16 *out_mad_pkey_index)
258 {
259 int status;
260 struct ocrdma_dev *dev;
261 const struct ib_mad *in_mad = (const struct ib_mad *)in;
262 struct ib_mad *out_mad = (struct ib_mad *)out;
263
264 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
265 *out_mad_size != sizeof(*out_mad)))
266 return IB_MAD_RESULT_FAILURE;
267
268 switch (in_mad->mad_hdr.mgmt_class) {
269 case IB_MGMT_CLASS_PERF_MGMT:
270 dev = get_ocrdma_dev(ibdev);
271 if (!ocrdma_pma_counters(dev, out_mad))
272 status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
273 else
274 status = IB_MAD_RESULT_SUCCESS;
275 break;
276 default:
277 status = IB_MAD_RESULT_SUCCESS;
278 break;
279 }
280 return status;
281 }