1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/interrupt.h>
34#include <linux/module.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/cmd.h>
37#include "mlx5_core.h"
38
39enum {
40	MLX5_EQE_SIZE		= sizeof(struct mlx5_eqe),
41	MLX5_EQE_OWNER_INIT_VAL	= 0x1,
42};
43
44enum {
45	MLX5_EQ_STATE_ARMED		= 0x9,
46	MLX5_EQ_STATE_FIRED		= 0xa,
47	MLX5_EQ_STATE_ALWAYS_ARMED	= 0xb,
48};
49
50enum {
51	MLX5_NUM_SPARE_EQE	= 0x80,
52	MLX5_NUM_ASYNC_EQE	= 0x100,
53	MLX5_NUM_CMD_EQE	= 32,
54};
55
56enum {
57	MLX5_EQ_DOORBEL_OFFSET	= 0x40,
58};
59
60#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG)	    | \
61			       (1ull << MLX5_EVENT_TYPE_COMM_EST)	    | \
62			       (1ull << MLX5_EVENT_TYPE_SQ_DRAINED)	    | \
63			       (1ull << MLX5_EVENT_TYPE_CQ_ERROR)	    | \
64			       (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR)	    | \
65			       (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED)    | \
66			       (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
67			       (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
68			       (1ull << MLX5_EVENT_TYPE_PORT_CHANGE)	    | \
69			       (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
70			       (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE)	    | \
71			       (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
72
73struct map_eq_in {
74	u64	mask;
75	u32	reserved;
76	u32	unmap_eqn;
77};
78
79struct cre_des_eq {
80	u8	reserved[15];
81	u8	eqn;
82};
83
84static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
85{
86	struct mlx5_destroy_eq_mbox_in in;
87	struct mlx5_destroy_eq_mbox_out out;
88	int err;
89
90	memset(&in, 0, sizeof(in));
91	memset(&out, 0, sizeof(out));
92	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ);
93	in.eqn = eqn;
94	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
95	if (!err)
96		goto ex;
97
98	if (out.hdr.status)
99		err = mlx5_cmd_status_to_err(&out.hdr);
100
101ex:
102	return err;
103}
104
105static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
106{
107	return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
108}
109
110static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
111{
112	struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
113
114	return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
115}
116
117static const char *eqe_type_str(u8 type)
118{
119	switch (type) {
120	case MLX5_EVENT_TYPE_COMP:
121		return "MLX5_EVENT_TYPE_COMP";
122	case MLX5_EVENT_TYPE_PATH_MIG:
123		return "MLX5_EVENT_TYPE_PATH_MIG";
124	case MLX5_EVENT_TYPE_COMM_EST:
125		return "MLX5_EVENT_TYPE_COMM_EST";
126	case MLX5_EVENT_TYPE_SQ_DRAINED:
127		return "MLX5_EVENT_TYPE_SQ_DRAINED";
128	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
129		return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
130	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
131		return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
132	case MLX5_EVENT_TYPE_CQ_ERROR:
133		return "MLX5_EVENT_TYPE_CQ_ERROR";
134	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
135		return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
136	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
137		return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
138	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
139		return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
140	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
141		return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
142	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
143		return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
144	case MLX5_EVENT_TYPE_INTERNAL_ERROR:
145		return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
146	case MLX5_EVENT_TYPE_PORT_CHANGE:
147		return "MLX5_EVENT_TYPE_PORT_CHANGE";
148	case MLX5_EVENT_TYPE_GPIO_EVENT:
149		return "MLX5_EVENT_TYPE_GPIO_EVENT";
150	case MLX5_EVENT_TYPE_REMOTE_CONFIG:
151		return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
152	case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
153		return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
154	case MLX5_EVENT_TYPE_STALL_EVENT:
155		return "MLX5_EVENT_TYPE_STALL_EVENT";
156	case MLX5_EVENT_TYPE_CMD:
157		return "MLX5_EVENT_TYPE_CMD";
158	case MLX5_EVENT_TYPE_PAGE_REQUEST:
159		return "MLX5_EVENT_TYPE_PAGE_REQUEST";
160	case MLX5_EVENT_TYPE_PAGE_FAULT:
161		return "MLX5_EVENT_TYPE_PAGE_FAULT";
162	default:
163		return "Unrecognized event";
164	}
165}
166
167static enum mlx5_dev_event port_subtype_event(u8 subtype)
168{
169	switch (subtype) {
170	case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
171		return MLX5_DEV_EVENT_PORT_DOWN;
172	case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
173		return MLX5_DEV_EVENT_PORT_UP;
174	case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
175		return MLX5_DEV_EVENT_PORT_INITIALIZED;
176	case MLX5_PORT_CHANGE_SUBTYPE_LID:
177		return MLX5_DEV_EVENT_LID_CHANGE;
178	case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
179		return MLX5_DEV_EVENT_PKEY_CHANGE;
180	case MLX5_PORT_CHANGE_SUBTYPE_GUID:
181		return MLX5_DEV_EVENT_GUID_CHANGE;
182	case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
183		return MLX5_DEV_EVENT_CLIENT_REREG;
184	}
185	return -1;
186}
187
188static void eq_update_ci(struct mlx5_eq *eq, int arm)
189{
190	__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
191	u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
192	__raw_writel((__force u32) cpu_to_be32(val), addr);
193	/* We still want ordering, just not swabbing, so add a barrier */
194	mb();
195}
196
197static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
198{
199	struct mlx5_eqe *eqe;
200	int eqes_found = 0;
201	int set_ci = 0;
202	u32 cqn;
203	u32 rsn;
204	u8 port;
205
206	while ((eqe = next_eqe_sw(eq))) {
207		/*
208		 * Make sure we read EQ entry contents after we've
209		 * checked the ownership bit.
210		 */
211		dma_rmb();
212
213		mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
214			      eq->eqn, eqe_type_str(eqe->type));
215		switch (eqe->type) {
216		case MLX5_EVENT_TYPE_COMP:
217			cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
218			mlx5_cq_completion(dev, cqn);
219			break;
220
221		case MLX5_EVENT_TYPE_PATH_MIG:
222		case MLX5_EVENT_TYPE_COMM_EST:
223		case MLX5_EVENT_TYPE_SQ_DRAINED:
224		case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
225		case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
226		case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
227		case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
228		case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
229			rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
230			mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
231				      eqe_type_str(eqe->type), eqe->type, rsn);
232			mlx5_rsc_event(dev, rsn, eqe->type);
233			break;
234
235		case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
236		case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
237			rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
238			mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
239				      eqe_type_str(eqe->type), eqe->type, rsn);
240			mlx5_srq_event(dev, rsn, eqe->type);
241			break;
242
243		case MLX5_EVENT_TYPE_CMD:
244			mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
245			break;
246
247		case MLX5_EVENT_TYPE_PORT_CHANGE:
248			port = (eqe->data.port.port >> 4) & 0xf;
249			switch (eqe->sub_type) {
250			case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
251			case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
252			case MLX5_PORT_CHANGE_SUBTYPE_LID:
253			case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
254			case MLX5_PORT_CHANGE_SUBTYPE_GUID:
255			case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
256			case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
257				if (dev->event)
258					dev->event(dev, port_subtype_event(eqe->sub_type),
259						   (unsigned long)port);
260				break;
261			default:
262				mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
263					       port, eqe->sub_type);
264			}
265			break;
266		case MLX5_EVENT_TYPE_CQ_ERROR:
267			cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
268			mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
269				       cqn, eqe->data.cq_err.syndrome);
270			mlx5_cq_event(dev, cqn, eqe->type);
271			break;
272
273		case MLX5_EVENT_TYPE_PAGE_REQUEST:
274			{
275				u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
276				s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
277
278				mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
279					      func_id, npages);
280				mlx5_core_req_pages_handler(dev, func_id, npages);
281			}
282			break;
283
284#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
285		case MLX5_EVENT_TYPE_PAGE_FAULT:
286			mlx5_eq_pagefault(dev, eqe);
287			break;
288#endif
289
290		default:
291			mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
292				       eqe->type, eq->eqn);
293			break;
294		}
295
296		++eq->cons_index;
297		eqes_found = 1;
298		++set_ci;
299
300		/* The HCA will think the queue has overflowed if we
301		 * don't tell it we've been processing events.  We
302		 * create our EQs with MLX5_NUM_SPARE_EQE extra
303		 * entries, so we must update our consumer index at
304		 * least that often.
305		 */
306		if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
307			eq_update_ci(eq, 0);
308			set_ci = 0;
309		}
310	}
311
312	eq_update_ci(eq, 1);
313
314	return eqes_found;
315}
316
317static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr)
318{
319	struct mlx5_eq *eq = eq_ptr;
320	struct mlx5_core_dev *dev = eq->dev;
321
322	mlx5_eq_int(dev, eq);
323
324	/* MSI-X vectors always belong to us */
325	return IRQ_HANDLED;
326}
327
328static void init_eq_buf(struct mlx5_eq *eq)
329{
330	struct mlx5_eqe *eqe;
331	int i;
332
333	for (i = 0; i < eq->nent; i++) {
334		eqe = get_eqe(eq, i);
335		eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
336	}
337}
338
339int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
340		       int nent, u64 mask, const char *name, struct mlx5_uar *uar)
341{
342	struct mlx5_eq_table *table = &dev->priv.eq_table;
343	struct mlx5_create_eq_mbox_in *in;
344	struct mlx5_create_eq_mbox_out out;
345	int err;
346	int inlen;
347
348	eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
349	err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
350			     &eq->buf);
351	if (err)
352		return err;
353
354	init_eq_buf(eq);
355
356	inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages;
357	in = mlx5_vzalloc(inlen);
358	if (!in) {
359		err = -ENOMEM;
360		goto err_buf;
361	}
362	memset(&out, 0, sizeof(out));
363
364	mlx5_fill_page_array(&eq->buf, in->pas);
365
366	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
367	in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
368	in->ctx.intr = vecidx;
369	in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
370	in->events_mask = cpu_to_be64(mask);
371
372	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
373	if (err)
374		goto err_in;
375
376	if (out.hdr.status) {
377		err = mlx5_cmd_status_to_err(&out.hdr);
378		goto err_in;
379	}
380
381	snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
382		 name, pci_name(dev->pdev));
383	eq->eqn = out.eq_number;
384	eq->irqn = vecidx;
385	eq->dev = dev;
386	eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
387	err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
388			  eq->name, eq);
389	if (err)
390		goto err_eq;
391
392	err = mlx5_debug_eq_add(dev, eq);
393	if (err)
394		goto err_irq;
395
396	/* EQs are created in ARMED state
397	 */
398	eq_update_ci(eq, 1);
399
400	kvfree(in);
401	return 0;
402
403err_irq:
404	free_irq(table->msix_arr[vecidx].vector, eq);
405
406err_eq:
407	mlx5_cmd_destroy_eq(dev, eq->eqn);
408
409err_in:
410	kvfree(in);
411
412err_buf:
413	mlx5_buf_free(dev, &eq->buf);
414	return err;
415}
416EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
417
418int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
419{
420	struct mlx5_eq_table *table = &dev->priv.eq_table;
421	int err;
422
423	mlx5_debug_eq_remove(dev, eq);
424	free_irq(table->msix_arr[eq->irqn].vector, eq);
425	err = mlx5_cmd_destroy_eq(dev, eq->eqn);
426	if (err)
427		mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
428			       eq->eqn);
429	synchronize_irq(table->msix_arr[eq->irqn].vector);
430	mlx5_buf_free(dev, &eq->buf);
431
432	return err;
433}
434EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
435
436int mlx5_eq_init(struct mlx5_core_dev *dev)
437{
438	int err;
439
440	spin_lock_init(&dev->priv.eq_table.lock);
441
442	err = mlx5_eq_debugfs_init(dev);
443
444	return err;
445}
446
447
448void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
449{
450	mlx5_eq_debugfs_cleanup(dev);
451}
452
453int mlx5_start_eqs(struct mlx5_core_dev *dev)
454{
455	struct mlx5_eq_table *table = &dev->priv.eq_table;
456	u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
457	int err;
458
459	if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
460		async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
461
462	err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
463				 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
464				 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
465	if (err) {
466		mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
467		return err;
468	}
469
470	mlx5_cmd_use_events(dev);
471
472	err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
473				 MLX5_NUM_ASYNC_EQE, async_event_mask,
474				 "mlx5_async_eq", &dev->priv.uuari.uars[0]);
475	if (err) {
476		mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
477		goto err1;
478	}
479
480	err = mlx5_create_map_eq(dev, &table->pages_eq,
481				 MLX5_EQ_VEC_PAGES,
482				 dev->caps.gen.max_vf + 1,
483				 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
484				 &dev->priv.uuari.uars[0]);
485	if (err) {
486		mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
487		goto err2;
488	}
489
490	return err;
491
492err2:
493	mlx5_destroy_unmap_eq(dev, &table->async_eq);
494
495err1:
496	mlx5_cmd_use_polling(dev);
497	mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
498	return err;
499}
500
501int mlx5_stop_eqs(struct mlx5_core_dev *dev)
502{
503	struct mlx5_eq_table *table = &dev->priv.eq_table;
504	int err;
505
506	err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
507	if (err)
508		return err;
509
510	mlx5_destroy_unmap_eq(dev, &table->async_eq);
511	mlx5_cmd_use_polling(dev);
512
513	err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
514	if (err)
515		mlx5_cmd_use_events(dev);
516
517	return err;
518}
519
520int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
521		       struct mlx5_query_eq_mbox_out *out, int outlen)
522{
523	struct mlx5_query_eq_mbox_in in;
524	int err;
525
526	memset(&in, 0, sizeof(in));
527	memset(out, 0, outlen);
528	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ);
529	in.eqn = eq->eqn;
530	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
531	if (err)
532		return err;
533
534	if (out->hdr.status)
535		err = mlx5_cmd_status_to_err(&out->hdr);
536
537	return err;
538}
539EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
540