1/*
2 * Memory arbiter functions. Allocates bandwidth through the
3 * arbiter and sets up arbiter breakpoints.
4 *
5 * The algorithm first assigns slots to the clients that has specified
6 * bandwidth (e.g. ethernet) and then the remaining slots are divided
7 * on all the active clients.
8 *
9 * Copyright (c) 2004-2007 Axis Communications AB.
10 */
11
12#include <hwregs/reg_map.h>
13#include <hwregs/reg_rdwr.h>
14#include <hwregs/marb_defs.h>
15#include <arbiter.h>
16#include <hwregs/intr_vect.h>
17#include <linux/interrupt.h>
18#include <linux/signal.h>
19#include <linux/errno.h>
20#include <linux/spinlock.h>
21#include <asm/io.h>
22#include <asm/irq_regs.h>
23
24struct crisv32_watch_entry {
25	unsigned long instance;
26	watch_callback *cb;
27	unsigned long start;
28	unsigned long end;
29	int used;
30};
31
32#define NUMBER_OF_BP 4
33#define NBR_OF_CLIENTS 14
34#define NBR_OF_SLOTS 64
35#define SDRAM_BANDWIDTH 100000000	/* Some kind of expected value */
36#define INTMEM_BANDWIDTH 400000000
37#define NBR_OF_REGIONS 2
38
39static struct crisv32_watch_entry watches[NUMBER_OF_BP] = {
40	{regi_marb_bp0},
41	{regi_marb_bp1},
42	{regi_marb_bp2},
43	{regi_marb_bp3}
44};
45
46static u8 requested_slots[NBR_OF_REGIONS][NBR_OF_CLIENTS];
47static u8 active_clients[NBR_OF_REGIONS][NBR_OF_CLIENTS];
48static int max_bandwidth[NBR_OF_REGIONS] =
49    { SDRAM_BANDWIDTH, INTMEM_BANDWIDTH };
50
51DEFINE_SPINLOCK(arbiter_lock);
52
53static irqreturn_t crisv32_arbiter_irq(int irq, void *dev_id);
54
55/*
56 * "I'm the arbiter, I know the score.
57 *  From square one I'll be watching all 64."
58 * (memory arbiter slots, that is)
59 *
60 *  Or in other words:
61 * Program the memory arbiter slots for "region" according to what's
62 * in requested_slots[] and active_clients[], while minimizing
63 * latency. A caller may pass a non-zero positive amount for
64 * "unused_slots", which must then be the unallocated, remaining
65 * number of slots, free to hand out to any client.
66 */
67
68static void crisv32_arbiter_config(int region, int unused_slots)
69{
70	int slot;
71	int client;
72	int interval = 0;
73
74	/*
75	 * This vector corresponds to the hardware arbiter slots (see
76	 * the hardware documentation for semantics). We initialize
77	 * each slot with a suitable sentinel value outside the valid
78	 * range {0 .. NBR_OF_CLIENTS - 1} and replace them with
79	 * client indexes. Then it's fed to the hardware.
80	 */
81	s8 val[NBR_OF_SLOTS];
82
83	for (slot = 0; slot < NBR_OF_SLOTS; slot++)
84		val[slot] = -1;
85
86	for (client = 0; client < NBR_OF_CLIENTS; client++) {
87		int pos;
88		/* Allocate the requested non-zero number of slots, but
89		 * also give clients with zero-requests one slot each
90		 * while stocks last. We do the latter here, in client
91		 * order. This makes sure zero-request clients are the
92		 * first to get to any spare slots, else those slots
93		 * could, when bandwidth is allocated close to the limit,
94		 * all be allocated to low-index non-zero-request clients
95		 * in the default-fill loop below. Another positive but
96		 * secondary effect is a somewhat better spread of the
97		 * zero-bandwidth clients in the vector, avoiding some of
98		 * the latency that could otherwise be caused by the
99		 * partitioning of non-zero-bandwidth clients at low
100		 * indexes and zero-bandwidth clients at high
101		 * indexes. (Note that this spreading can only affect the
102		 * unallocated bandwidth.)  All the above only matters for
103		 * memory-intensive situations, of course.
104		 */
105		if (!requested_slots[region][client]) {
106			/*
107			 * Skip inactive clients. Also skip zero-slot
108			 * allocations in this pass when there are no known
109			 * free slots.
110			 */
111			if (!active_clients[region][client]
112			    || unused_slots <= 0)
113				continue;
114
115			unused_slots--;
116
117			/* Only allocate one slot for this client. */
118			interval = NBR_OF_SLOTS;
119		} else
120			interval =
121			    NBR_OF_SLOTS / requested_slots[region][client];
122
123		pos = 0;
124		while (pos < NBR_OF_SLOTS) {
125			if (val[pos] >= 0)
126				pos++;
127			else {
128				val[pos] = client;
129				pos += interval;
130			}
131		}
132	}
133
134	client = 0;
135	for (slot = 0; slot < NBR_OF_SLOTS; slot++) {
136		/*
137		 * Allocate remaining slots in round-robin
138		 * client-number order for active clients. For this
139		 * pass, we ignore requested bandwidth and previous
140		 * allocations.
141		 */
142		if (val[slot] < 0) {
143			int first = client;
144			while (!active_clients[region][client]) {
145				client = (client + 1) % NBR_OF_CLIENTS;
146				if (client == first)
147					break;
148			}
149			val[slot] = client;
150			client = (client + 1) % NBR_OF_CLIENTS;
151		}
152		if (region == EXT_REGION)
153			REG_WR_INT_VECT(marb, regi_marb, rw_ext_slots, slot,
154					val[slot]);
155		else if (region == INT_REGION)
156			REG_WR_INT_VECT(marb, regi_marb, rw_int_slots, slot,
157					val[slot]);
158	}
159}
160
161extern char _stext, _etext;
162
163static void crisv32_arbiter_init(void)
164{
165	static int initialized;
166
167	if (initialized)
168		return;
169
170	initialized = 1;
171
172	/*
173	 * CPU caches are always set to active, but with zero
174	 * bandwidth allocated. It should be ok to allocate zero
175	 * bandwidth for the caches, because DMA for other channels
176	 * will supposedly finish, once their programmed amount is
177	 * done, and then the caches will get access according to the
178	 * "fixed scheme" for unclaimed slots. Though, if for some
179	 * use-case somewhere, there's a maximum CPU latency for
180	 * e.g. some interrupt, we have to start allocating specific
181	 * bandwidth for the CPU caches too.
182	 */
183	active_clients[EXT_REGION][10] = active_clients[EXT_REGION][11] = 1;
184	crisv32_arbiter_config(EXT_REGION, 0);
185	crisv32_arbiter_config(INT_REGION, 0);
186
187	if (request_irq(MEMARB_INTR_VECT, crisv32_arbiter_irq, 0,
188			"arbiter", NULL))
189		printk(KERN_ERR "Couldn't allocate arbiter IRQ\n");
190
191#ifndef CONFIG_ETRAX_KGDB
192	/* Global watch for writes to kernel text segment. */
193	crisv32_arbiter_watch(virt_to_phys(&_stext), &_etext - &_stext,
194			      arbiter_all_clients, arbiter_all_write, NULL);
195#endif
196}
197
198/* Main entry for bandwidth allocation. */
199
200int crisv32_arbiter_allocate_bandwidth(int client, int region,
201				       unsigned long bandwidth)
202{
203	int i;
204	int total_assigned = 0;
205	int total_clients = 0;
206	int req;
207
208	crisv32_arbiter_init();
209
210	for (i = 0; i < NBR_OF_CLIENTS; i++) {
211		total_assigned += requested_slots[region][i];
212		total_clients += active_clients[region][i];
213	}
214
215	/* Avoid division by 0 for 0-bandwidth requests. */
216	req = bandwidth == 0
217	    ? 0 : NBR_OF_SLOTS / (max_bandwidth[region] / bandwidth);
218
219	/*
220	 * We make sure that there are enough slots only for non-zero
221	 * requests. Requesting 0 bandwidth *may* allocate slots,
222	 * though if all bandwidth is allocated, such a client won't
223	 * get any and will have to rely on getting memory access
224	 * according to the fixed scheme that's the default when one
225	 * of the slot-allocated clients doesn't claim their slot.
226	 */
227	if (total_assigned + req > NBR_OF_SLOTS)
228		return -ENOMEM;
229
230	active_clients[region][client] = 1;
231	requested_slots[region][client] = req;
232	crisv32_arbiter_config(region, NBR_OF_SLOTS - total_assigned);
233
234	return 0;
235}
236
237/*
238 * Main entry for bandwidth deallocation.
239 *
240 * Strictly speaking, for a somewhat constant set of clients where
241 * each client gets a constant bandwidth and is just enabled or
242 * disabled (somewhat dynamically), no action is necessary here to
243 * avoid starvation for non-zero-allocation clients, as the allocated
244 * slots will just be unused. However, handing out those unused slots
245 * to active clients avoids needless latency if the "fixed scheme"
246 * would give unclaimed slots to an eager low-index client.
247 */
248
249void crisv32_arbiter_deallocate_bandwidth(int client, int region)
250{
251	int i;
252	int total_assigned = 0;
253
254	requested_slots[region][client] = 0;
255	active_clients[region][client] = 0;
256
257	for (i = 0; i < NBR_OF_CLIENTS; i++)
258		total_assigned += requested_slots[region][i];
259
260	crisv32_arbiter_config(region, NBR_OF_SLOTS - total_assigned);
261}
262
263int crisv32_arbiter_watch(unsigned long start, unsigned long size,
264			  unsigned long clients, unsigned long accesses,
265			  watch_callback *cb)
266{
267	int i;
268
269	crisv32_arbiter_init();
270
271	if (start > 0x80000000) {
272		printk(KERN_ERR "Arbiter: %lX doesn't look like a "
273			"physical address", start);
274		return -EFAULT;
275	}
276
277	spin_lock(&arbiter_lock);
278
279	for (i = 0; i < NUMBER_OF_BP; i++) {
280		if (!watches[i].used) {
281			reg_marb_rw_intr_mask intr_mask =
282			    REG_RD(marb, regi_marb, rw_intr_mask);
283
284			watches[i].used = 1;
285			watches[i].start = start;
286			watches[i].end = start + size;
287			watches[i].cb = cb;
288
289			REG_WR_INT(marb_bp, watches[i].instance, rw_first_addr,
290				   watches[i].start);
291			REG_WR_INT(marb_bp, watches[i].instance, rw_last_addr,
292				   watches[i].end);
293			REG_WR_INT(marb_bp, watches[i].instance, rw_op,
294				   accesses);
295			REG_WR_INT(marb_bp, watches[i].instance, rw_clients,
296				   clients);
297
298			if (i == 0)
299				intr_mask.bp0 = regk_marb_yes;
300			else if (i == 1)
301				intr_mask.bp1 = regk_marb_yes;
302			else if (i == 2)
303				intr_mask.bp2 = regk_marb_yes;
304			else if (i == 3)
305				intr_mask.bp3 = regk_marb_yes;
306
307			REG_WR(marb, regi_marb, rw_intr_mask, intr_mask);
308			spin_unlock(&arbiter_lock);
309
310			return i;
311		}
312	}
313	spin_unlock(&arbiter_lock);
314	return -ENOMEM;
315}
316
317int crisv32_arbiter_unwatch(int id)
318{
319	reg_marb_rw_intr_mask intr_mask = REG_RD(marb, regi_marb, rw_intr_mask);
320
321	crisv32_arbiter_init();
322
323	spin_lock(&arbiter_lock);
324
325	if ((id < 0) || (id >= NUMBER_OF_BP) || (!watches[id].used)) {
326		spin_unlock(&arbiter_lock);
327		return -EINVAL;
328	}
329
330	memset(&watches[id], 0, sizeof(struct crisv32_watch_entry));
331
332	if (id == 0)
333		intr_mask.bp0 = regk_marb_no;
334	else if (id == 1)
335		intr_mask.bp1 = regk_marb_no;
336	else if (id == 2)
337		intr_mask.bp2 = regk_marb_no;
338	else if (id == 3)
339		intr_mask.bp3 = regk_marb_no;
340
341	REG_WR(marb, regi_marb, rw_intr_mask, intr_mask);
342
343	spin_unlock(&arbiter_lock);
344	return 0;
345}
346
347extern void show_registers(struct pt_regs *regs);
348
349static irqreturn_t crisv32_arbiter_irq(int irq, void *dev_id)
350{
351	reg_marb_r_masked_intr masked_intr =
352	    REG_RD(marb, regi_marb, r_masked_intr);
353	reg_marb_bp_r_brk_clients r_clients;
354	reg_marb_bp_r_brk_addr r_addr;
355	reg_marb_bp_r_brk_op r_op;
356	reg_marb_bp_r_brk_first_client r_first;
357	reg_marb_bp_r_brk_size r_size;
358	reg_marb_bp_rw_ack ack = { 0 };
359	reg_marb_rw_ack_intr ack_intr = {
360		.bp0 = 1, .bp1 = 1, .bp2 = 1, .bp3 = 1
361	};
362	struct crisv32_watch_entry *watch;
363
364	if (masked_intr.bp0) {
365		watch = &watches[0];
366		ack_intr.bp0 = regk_marb_yes;
367	} else if (masked_intr.bp1) {
368		watch = &watches[1];
369		ack_intr.bp1 = regk_marb_yes;
370	} else if (masked_intr.bp2) {
371		watch = &watches[2];
372		ack_intr.bp2 = regk_marb_yes;
373	} else if (masked_intr.bp3) {
374		watch = &watches[3];
375		ack_intr.bp3 = regk_marb_yes;
376	} else {
377		return IRQ_NONE;
378	}
379
380	/* Retrieve all useful information and print it. */
381	r_clients = REG_RD(marb_bp, watch->instance, r_brk_clients);
382	r_addr = REG_RD(marb_bp, watch->instance, r_brk_addr);
383	r_op = REG_RD(marb_bp, watch->instance, r_brk_op);
384	r_first = REG_RD(marb_bp, watch->instance, r_brk_first_client);
385	r_size = REG_RD(marb_bp, watch->instance, r_brk_size);
386
387	printk(KERN_INFO "Arbiter IRQ\n");
388	printk(KERN_INFO "Clients %X addr %X op %X first %X size %X\n",
389	       REG_TYPE_CONV(int, reg_marb_bp_r_brk_clients, r_clients),
390	       REG_TYPE_CONV(int, reg_marb_bp_r_brk_addr, r_addr),
391	       REG_TYPE_CONV(int, reg_marb_bp_r_brk_op, r_op),
392	       REG_TYPE_CONV(int, reg_marb_bp_r_brk_first_client, r_first),
393	       REG_TYPE_CONV(int, reg_marb_bp_r_brk_size, r_size));
394
395	REG_WR(marb_bp, watch->instance, rw_ack, ack);
396	REG_WR(marb, regi_marb, rw_ack_intr, ack_intr);
397
398	printk(KERN_INFO "IRQ occurred at %lX\n", get_irq_regs()->erp);
399
400	if (watch->cb)
401		watch->cb();
402
403	return IRQ_HANDLED;
404}
405