1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 */
14
15/*
16 * Implementation of mpipe gxio calls.
17 */
18
19#include <linux/errno.h>
20#include <linux/io.h>
21#include <linux/module.h>
22#include <linux/string.h>
23
24#include <gxio/iorpc_globals.h>
25#include <gxio/iorpc_mpipe.h>
26#include <gxio/iorpc_mpipe_info.h>
27#include <gxio/kiorpc.h>
28#include <gxio/mpipe.h>
29
30/* HACK: Avoid pointless "shadow" warnings. */
31#define link link_shadow
32
33int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
34{
35	char file[32];
36
37	int fd;
38	int i;
39
40	if (mpipe_index >= GXIO_MPIPE_INSTANCE_MAX)
41		return -EINVAL;
42
43	snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
44	fd = hv_dev_open((HV_VirtAddr) file, 0);
45
46	context->fd = fd;
47
48	if (fd < 0) {
49		if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
50			return fd;
51		else
52			return -ENODEV;
53	}
54
55	/* Map in the MMIO space. */
56	context->mmio_cfg_base = (void __force *)
57		iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
58			      HV_MPIPE_CONFIG_MMIO_SIZE);
59	if (context->mmio_cfg_base == NULL)
60		goto cfg_failed;
61
62	context->mmio_fast_base = (void __force *)
63		iorpc_ioremap(fd, HV_MPIPE_FAST_MMIO_OFFSET,
64			      HV_MPIPE_FAST_MMIO_SIZE);
65	if (context->mmio_fast_base == NULL)
66		goto fast_failed;
67
68	/* Initialize the stacks. */
69	for (i = 0; i < 8; i++)
70		context->__stacks.stacks[i] = 255;
71
72	context->instance = mpipe_index;
73
74	return 0;
75
76      fast_failed:
77	iounmap((void __force __iomem *)(context->mmio_cfg_base));
78      cfg_failed:
79	hv_dev_close(context->fd);
80	context->fd = -1;
81	return -ENODEV;
82}
83
84EXPORT_SYMBOL_GPL(gxio_mpipe_init);
85
86int gxio_mpipe_destroy(gxio_mpipe_context_t *context)
87{
88	iounmap((void __force __iomem *)(context->mmio_cfg_base));
89	iounmap((void __force __iomem *)(context->mmio_fast_base));
90	return hv_dev_close(context->fd);
91}
92
93EXPORT_SYMBOL_GPL(gxio_mpipe_destroy);
94
95static int16_t gxio_mpipe_buffer_sizes[8] =
96	{ 128, 256, 512, 1024, 1664, 4096, 10368, 16384 };
97
98gxio_mpipe_buffer_size_enum_t gxio_mpipe_buffer_size_to_buffer_size_enum(size_t
99									 size)
100{
101	int i;
102	for (i = 0; i < 7; i++)
103		if (size <= gxio_mpipe_buffer_sizes[i])
104			break;
105	return i;
106}
107
108EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_to_buffer_size_enum);
109
110size_t gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t
111						  buffer_size_enum)
112{
113	if (buffer_size_enum > 7)
114		buffer_size_enum = 7;
115
116	return gxio_mpipe_buffer_sizes[buffer_size_enum];
117}
118
119EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_enum_to_buffer_size);
120
121size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers)
122{
123	const int BUFFERS_PER_LINE = 12;
124
125	/* Count the number of cachlines. */
126	unsigned long lines =
127		(buffers + BUFFERS_PER_LINE - 1) / BUFFERS_PER_LINE;
128
129	/* Convert to bytes. */
130	return lines * CHIP_L2_LINE_SIZE();
131}
132
133EXPORT_SYMBOL_GPL(gxio_mpipe_calc_buffer_stack_bytes);
134
135int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context,
136				 unsigned int stack,
137				 gxio_mpipe_buffer_size_enum_t
138				 buffer_size_enum, void *mem, size_t mem_size,
139				 unsigned int mem_flags)
140{
141	int result;
142
143	memset(mem, 0, mem_size);
144
145	result = gxio_mpipe_init_buffer_stack_aux(context, mem, mem_size,
146						  mem_flags, stack,
147						  buffer_size_enum);
148	if (result < 0)
149		return result;
150
151	/* Save the stack. */
152	context->__stacks.stacks[buffer_size_enum] = stack;
153
154	return 0;
155}
156
157EXPORT_SYMBOL_GPL(gxio_mpipe_init_buffer_stack);
158
159int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context,
160			       unsigned int ring,
161			       void *mem, size_t mem_size,
162			       unsigned int mem_flags)
163{
164	return gxio_mpipe_init_notif_ring_aux(context, mem, mem_size,
165					      mem_flags, ring);
166}
167
168EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_ring);
169
170int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t *context,
171					    unsigned int group,
172					    unsigned int ring,
173					    unsigned int num_rings,
174					    unsigned int bucket,
175					    unsigned int num_buckets,
176					    gxio_mpipe_bucket_mode_t mode)
177{
178	int i;
179	int result;
180
181	gxio_mpipe_bucket_info_t bucket_info = { {
182						  .group = group,
183						  .mode = mode,
184						  }
185	};
186
187	gxio_mpipe_notif_group_bits_t bits = { {0} };
188
189	for (i = 0; i < num_rings; i++)
190		gxio_mpipe_notif_group_add_ring(&bits, ring + i);
191
192	result = gxio_mpipe_init_notif_group(context, group, bits);
193	if (result != 0)
194		return result;
195
196	for (i = 0; i < num_buckets; i++) {
197		bucket_info.notifring = ring + (i % num_rings);
198
199		result = gxio_mpipe_init_bucket(context, bucket + i,
200						bucket_info);
201		if (result != 0)
202			return result;
203	}
204
205	return 0;
206}
207
208EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_group_and_buckets);
209
210int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
211			      unsigned int ring, unsigned int channel,
212			      void *mem, size_t mem_size,
213			      unsigned int mem_flags)
214{
215	memset(mem, 0, mem_size);
216
217	return gxio_mpipe_init_edma_ring_aux(context, mem, mem_size, mem_flags,
218					     ring, channel);
219}
220
221EXPORT_SYMBOL_GPL(gxio_mpipe_init_edma_ring);
222
223void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules,
224			   gxio_mpipe_context_t *context)
225{
226	rules->context = context;
227	memset(&rules->list, 0, sizeof(rules->list));
228}
229
230EXPORT_SYMBOL_GPL(gxio_mpipe_rules_init);
231
232int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules,
233			   unsigned int bucket, unsigned int num_buckets,
234			   gxio_mpipe_rules_stacks_t *stacks)
235{
236	int i;
237	int stack = 255;
238
239	gxio_mpipe_rules_list_t *list = &rules->list;
240
241	/* Current rule. */
242	gxio_mpipe_rules_rule_t *rule =
243		(gxio_mpipe_rules_rule_t *) (list->rules + list->head);
244
245	unsigned int head = list->tail;
246
247	/*
248	 * Align next rule properly.
249	 *Note that "dmacs_and_vlans" will also be aligned.
250	 */
251	unsigned int pad = 0;
252	while (((head + pad) % __alignof__(gxio_mpipe_rules_rule_t)) != 0)
253		pad++;
254
255	/*
256	 * Verify room.
257	 * ISSUE: Mark rules as broken on error?
258	 */
259	if (head + pad + sizeof(*rule) >= sizeof(list->rules))
260		return GXIO_MPIPE_ERR_RULES_FULL;
261
262	/* Verify num_buckets is a power of 2. */
263	if (__builtin_popcount(num_buckets) != 1)
264		return GXIO_MPIPE_ERR_RULES_INVALID;
265
266	/* Add padding to previous rule. */
267	rule->size += pad;
268
269	/* Start a new rule. */
270	list->head = head + pad;
271
272	rule = (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
273
274	/* Default some values. */
275	rule->headroom = 2;
276	rule->tailroom = 0;
277	rule->capacity = 16384;
278
279	/* Save the bucket info. */
280	rule->bucket_mask = num_buckets - 1;
281	rule->bucket_first = bucket;
282
283	for (i = 8 - 1; i >= 0; i--) {
284		int maybe =
285			stacks ? stacks->stacks[i] : rules->context->__stacks.
286			stacks[i];
287		if (maybe != 255)
288			stack = maybe;
289		rule->stacks.stacks[i] = stack;
290	}
291
292	if (stack == 255)
293		return GXIO_MPIPE_ERR_RULES_INVALID;
294
295	/* NOTE: Only entries at the end of the array can be 255. */
296	for (i = 8 - 1; i > 0; i--) {
297		if (rule->stacks.stacks[i] == 255) {
298			rule->stacks.stacks[i] = stack;
299			rule->capacity =
300				gxio_mpipe_buffer_size_enum_to_buffer_size(i -
301									   1);
302		}
303	}
304
305	rule->size = sizeof(*rule);
306	list->tail = list->head + rule->size;
307
308	return 0;
309}
310
311EXPORT_SYMBOL_GPL(gxio_mpipe_rules_begin);
312
313int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules,
314				 unsigned int channel)
315{
316	gxio_mpipe_rules_list_t *list = &rules->list;
317
318	gxio_mpipe_rules_rule_t *rule =
319		(gxio_mpipe_rules_rule_t *) (list->rules + list->head);
320
321	/* Verify channel. */
322	if (channel >= 32)
323		return GXIO_MPIPE_ERR_RULES_INVALID;
324
325	/* Verify begun. */
326	if (list->tail == 0)
327		return GXIO_MPIPE_ERR_RULES_EMPTY;
328
329	rule->channel_bits |= (1UL << channel);
330
331	return 0;
332}
333
334EXPORT_SYMBOL_GPL(gxio_mpipe_rules_add_channel);
335
336int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules, uint8_t headroom)
337{
338	gxio_mpipe_rules_list_t *list = &rules->list;
339
340	gxio_mpipe_rules_rule_t *rule =
341		(gxio_mpipe_rules_rule_t *) (list->rules + list->head);
342
343	/* Verify begun. */
344	if (list->tail == 0)
345		return GXIO_MPIPE_ERR_RULES_EMPTY;
346
347	rule->headroom = headroom;
348
349	return 0;
350}
351
352EXPORT_SYMBOL_GPL(gxio_mpipe_rules_set_headroom);
353
354int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules)
355{
356	gxio_mpipe_rules_list_t *list = &rules->list;
357	unsigned int size =
358		offsetof(gxio_mpipe_rules_list_t, rules) + list->tail;
359	return gxio_mpipe_commit_rules(rules->context, list, size);
360}
361
362EXPORT_SYMBOL_GPL(gxio_mpipe_rules_commit);
363
364int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue,
365			   gxio_mpipe_context_t *context,
366			   unsigned int ring,
367			   void *mem, size_t mem_size, unsigned int mem_flags)
368{
369	/* The init call below will verify that "mem_size" is legal. */
370	unsigned int num_entries = mem_size / sizeof(gxio_mpipe_idesc_t);
371
372	iqueue->context = context;
373	iqueue->idescs = (gxio_mpipe_idesc_t *)mem;
374	iqueue->ring = ring;
375	iqueue->num_entries = num_entries;
376	iqueue->mask_num_entries = num_entries - 1;
377	iqueue->log2_num_entries = __builtin_ctz(num_entries);
378	iqueue->head = 1;
379#ifdef __BIG_ENDIAN__
380	iqueue->swapped = 0;
381#endif
382
383	/* Initialize the "tail". */
384	__gxio_mmio_write(mem, iqueue->head);
385
386	return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size,
387					  mem_flags);
388}
389
390EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init);
391
392int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
393			   gxio_mpipe_context_t *context,
394			   unsigned int ering,
395			   unsigned int channel,
396			   void *mem, unsigned int mem_size,
397			   unsigned int mem_flags)
398{
399	/* The init call below will verify that "mem_size" is legal. */
400	unsigned int num_entries = mem_size / sizeof(gxio_mpipe_edesc_t);
401
402	/* Offset used to read number of completed commands. */
403	MPIPE_EDMA_POST_REGION_ADDR_t offset;
404
405	int result = gxio_mpipe_init_edma_ring(context, ering, channel,
406					       mem, mem_size, mem_flags);
407	if (result < 0)
408		return result;
409
410	memset(equeue, 0, sizeof(*equeue));
411
412	offset.word = 0;
413	offset.region =
414		MPIPE_MMIO_ADDR__REGION_VAL_EDMA -
415		MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
416	offset.ring = ering;
417
418	__gxio_dma_queue_init(&equeue->dma_queue,
419			      context->mmio_fast_base + offset.word,
420			      num_entries);
421	equeue->edescs = mem;
422	equeue->mask_num_entries = num_entries - 1;
423	equeue->log2_num_entries = __builtin_ctz(num_entries);
424	equeue->context = context;
425	equeue->ering = ering;
426	equeue->channel = channel;
427
428	return 0;
429}
430
431EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init);
432
433int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
434			     const struct timespec64 *ts)
435{
436	cycles_t cycles = get_cycles();
437	return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec,
438					    (uint64_t)ts->tv_nsec,
439					    (uint64_t)cycles);
440}
441EXPORT_SYMBOL_GPL(gxio_mpipe_set_timestamp);
442
443int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
444			     struct timespec64 *ts)
445{
446	int ret;
447	cycles_t cycles_prev, cycles_now, clock_rate;
448	cycles_prev = get_cycles();
449	ret = gxio_mpipe_get_timestamp_aux(context, (uint64_t *)&ts->tv_sec,
450					   (uint64_t *)&ts->tv_nsec,
451					   (uint64_t *)&cycles_now);
452	if (ret < 0) {
453		return ret;
454	}
455
456	clock_rate = get_clock_rate();
457	ts->tv_nsec -= (cycles_now - cycles_prev) * 1000000000LL / clock_rate;
458	if (ts->tv_nsec < 0) {
459		ts->tv_nsec += 1000000000LL;
460		ts->tv_sec -= 1;
461	}
462	return ret;
463}
464EXPORT_SYMBOL_GPL(gxio_mpipe_get_timestamp);
465
466int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta)
467{
468	return gxio_mpipe_adjust_timestamp_aux(context, delta);
469}
470EXPORT_SYMBOL_GPL(gxio_mpipe_adjust_timestamp);
471
472/* Get our internal context used for link name access.  This context is
473 *  special in that it is not associated with an mPIPE service domain.
474 */
475static gxio_mpipe_context_t *_gxio_get_link_context(void)
476{
477	static gxio_mpipe_context_t context;
478	static gxio_mpipe_context_t *contextp;
479	static int tried_open = 0;
480	static DEFINE_MUTEX(mutex);
481
482	mutex_lock(&mutex);
483
484	if (!tried_open) {
485		int i = 0;
486		tried_open = 1;
487
488		/*
489		 * "4" here is the maximum possible number of mPIPE shims; it's
490		 * an exaggeration but we shouldn't ever go beyond 2 anyway.
491		 */
492		for (i = 0; i < 4; i++) {
493			char file[80];
494
495			snprintf(file, sizeof(file), "mpipe/%d/iorpc_info", i);
496			context.fd = hv_dev_open((HV_VirtAddr) file, 0);
497			if (context.fd < 0)
498				continue;
499
500			contextp = &context;
501			break;
502		}
503	}
504
505	mutex_unlock(&mutex);
506
507	return contextp;
508}
509
510int gxio_mpipe_link_instance(const char *link_name)
511{
512	_gxio_mpipe_link_name_t name;
513	gxio_mpipe_context_t *context = _gxio_get_link_context();
514
515	if (!context)
516		return GXIO_ERR_NO_DEVICE;
517
518	if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
519		return GXIO_ERR_NO_DEVICE;
520
521	return gxio_mpipe_info_instance_aux(context, name);
522}
523EXPORT_SYMBOL_GPL(gxio_mpipe_link_instance);
524
525int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
526{
527	int rv;
528	_gxio_mpipe_link_name_t name;
529	_gxio_mpipe_link_mac_t mac;
530
531	gxio_mpipe_context_t *context = _gxio_get_link_context();
532	if (!context)
533		return GXIO_ERR_NO_DEVICE;
534
535	rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
536	if (rv >= 0) {
537		if (strscpy(link_name, name.name, sizeof(name.name)) < 0)
538			return GXIO_ERR_INVAL_MEMORY_SIZE;
539		memcpy(link_mac, mac.mac, sizeof(mac.mac));
540	}
541
542	return rv;
543}
544
545EXPORT_SYMBOL_GPL(gxio_mpipe_link_enumerate_mac);
546
547int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
548			 gxio_mpipe_context_t *context, const char *link_name,
549			 unsigned int flags)
550{
551	_gxio_mpipe_link_name_t name;
552	int rv;
553
554	if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
555		return GXIO_ERR_NO_DEVICE;
556
557	rv = gxio_mpipe_link_open_aux(context, name, flags);
558	if (rv < 0)
559		return rv;
560
561	link->context = context;
562	link->channel = rv >> 8;
563	link->mac = rv & 0xFF;
564
565	return 0;
566}
567
568EXPORT_SYMBOL_GPL(gxio_mpipe_link_open);
569
570int gxio_mpipe_link_close(gxio_mpipe_link_t *link)
571{
572	return gxio_mpipe_link_close_aux(link->context, link->mac);
573}
574
575EXPORT_SYMBOL_GPL(gxio_mpipe_link_close);
576
577int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
578			     int64_t val)
579{
580	return gxio_mpipe_link_set_attr_aux(link->context, link->mac, attr,
581					    val);
582}
583
584EXPORT_SYMBOL_GPL(gxio_mpipe_link_set_attr);
585