1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 */
14
15#include <linux/io.h>
16#include <linux/atomic.h>
17#include <linux/module.h>
18#include <gxio/dma_queue.h>
19
20/* Wait for a memory read to complete. */
21#define wait_for_value(val)                             \
22  __asm__ __volatile__("move %0, %0" :: "r"(val))
23
24/* The index is in the low 16. */
25#define DMA_QUEUE_INDEX_MASK ((1 << 16) - 1)
26
27/*
28 * The hardware descriptor-ring type.
29 * This matches the types used by mpipe (MPIPE_EDMA_POST_REGION_VAL_t)
30 * and trio (TRIO_PUSH_DMA_REGION_VAL_t or TRIO_PULL_DMA_REGION_VAL_t).
31 * See those types for more documentation on the individual fields.
32 */
33typedef union {
34	struct {
35#ifndef __BIG_ENDIAN__
36		uint64_t ring_idx:16;
37		uint64_t count:16;
38		uint64_t gen:1;
39		uint64_t __reserved:31;
40#else
41		uint64_t __reserved:31;
42		uint64_t gen:1;
43		uint64_t count:16;
44		uint64_t ring_idx:16;
45#endif
46	};
47	uint64_t word;
48} __gxio_ring_t;
49
50void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue,
51			   void *post_region_addr, unsigned int num_entries)
52{
53	/*
54	 * Limit 65536 entry rings to 65535 credits because we only have a
55	 * 16 bit completion counter.
56	 */
57	int64_t credits = (num_entries < 65536) ? num_entries : 65535;
58
59	memset(dma_queue, 0, sizeof(*dma_queue));
60
61	dma_queue->post_region_addr = post_region_addr;
62	dma_queue->hw_complete_count = 0;
63	dma_queue->credits_and_next_index = credits << DMA_QUEUE_CREDIT_SHIFT;
64}
65
66EXPORT_SYMBOL_GPL(__gxio_dma_queue_init);
67
68void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue)
69{
70	__gxio_ring_t val;
71	uint64_t count;
72	uint64_t delta;
73	uint64_t new_count;
74
75	/*
76	 * Read the 64-bit completion count without touching the cache, so
77	 * we later avoid having to evict any sharers of this cache line
78	 * when we update it below.
79	 */
80	uint64_t orig_hw_complete_count =
81		cmpxchg(&dma_queue->hw_complete_count,
82			-1, -1);
83
84	/* Make sure the load completes before we access the hardware. */
85	wait_for_value(orig_hw_complete_count);
86
87	/* Read the 16-bit count of how many packets it has completed. */
88	val.word = __gxio_mmio_read(dma_queue->post_region_addr);
89	count = val.count;
90
91	/*
92	 * Calculate the number of completions since we last updated the
93	 * 64-bit counter.  It's safe to ignore the high bits because the
94	 * maximum credit value is 65535.
95	 */
96	delta = (count - orig_hw_complete_count) & 0xffff;
97	if (delta == 0)
98		return;
99
100	/*
101	 * Try to write back the count, advanced by delta.  If we race with
102	 * another thread, this might fail, in which case we return
103	 * immediately on the assumption that some credits are (or at least
104	 * were) available.
105	 */
106	new_count = orig_hw_complete_count + delta;
107	if (cmpxchg(&dma_queue->hw_complete_count,
108		    orig_hw_complete_count,
109		    new_count) != orig_hw_complete_count)
110		return;
111
112	/*
113	 * We succeeded in advancing the completion count; add back the
114	 * corresponding number of egress credits.
115	 */
116	__insn_fetchadd(&dma_queue->credits_and_next_index,
117			(delta << DMA_QUEUE_CREDIT_SHIFT));
118}
119
120EXPORT_SYMBOL_GPL(__gxio_dma_queue_update_credits);
121
122/*
123 * A separate 'blocked' method for put() so that backtraces and
124 * profiles will clearly indicate that we're wasting time spinning on
125 * egress availability rather than actually posting commands.
126 */
127int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue,
128					  int64_t modifier)
129{
130	int backoff = 16;
131	int64_t old;
132
133	do {
134		int i;
135		/* Back off to avoid spamming memory networks. */
136		for (i = backoff; i > 0; i--)
137			__insn_mfspr(SPR_PASS);
138
139		/* Check credits again. */
140		__gxio_dma_queue_update_credits(dma_queue);
141		old = __insn_fetchaddgez(&dma_queue->credits_and_next_index,
142					 modifier);
143
144		/* Calculate bounded exponential backoff for next iteration. */
145		if (backoff < 256)
146			backoff *= 2;
147	} while (old + modifier < 0);
148
149	return old;
150}
151
152EXPORT_SYMBOL_GPL(__gxio_dma_queue_wait_for_credits);
153
154int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue,
155				     unsigned int num, int wait)
156{
157	return __gxio_dma_queue_reserve(dma_queue, num, wait != 0, true);
158}
159
160EXPORT_SYMBOL_GPL(__gxio_dma_queue_reserve_aux);
161
162int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
163				 int64_t completion_slot, int update)
164{
165	if (update) {
166		if (ACCESS_ONCE(dma_queue->hw_complete_count) >
167		    completion_slot)
168			return 1;
169
170		__gxio_dma_queue_update_credits(dma_queue);
171	}
172
173	return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot;
174}
175
176EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);
177