1 /*
2  * Support for the Tundra TSI148 VME-PCI Bridge Chip
3  *
4  * Author: Martyn Welch <martyn.welch@ge.com>
5  * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
6  *
7  * Based on work by Tom Armistead and Ajit Prem
8  * Copyright 2004 Motorola Inc.
9  *
10  * This program is free software; you can redistribute  it and/or modify it
11  * under  the terms of  the GNU General  Public License as published by the
12  * Free Software Foundation;  either version 2 of the  License, or (at your
13  * option) any later version.
14  */
15 
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/mm.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pci.h>
23 #include <linux/poll.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/time.h>
30 #include <linux/io.h>
31 #include <linux/uaccess.h>
32 #include <linux/byteorder/generic.h>
33 #include <linux/vme.h>
34 
35 #include "../vme_bridge.h"
36 #include "vme_tsi148.h"
37 
38 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
39 static void tsi148_remove(struct pci_dev *);
40 
41 
42 /* Module parameter */
43 static bool err_chk;
44 static int geoid;
45 
46 static const char driver_name[] = "vme_tsi148";
47 
48 static const struct pci_device_id tsi148_ids[] = {
49 	{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
50 	{ },
51 };
52 
53 static struct pci_driver tsi148_driver = {
54 	.name = driver_name,
55 	.id_table = tsi148_ids,
56 	.probe = tsi148_probe,
57 	.remove = tsi148_remove,
58 };
59 
reg_join(unsigned int high,unsigned int low,unsigned long long * variable)60 static void reg_join(unsigned int high, unsigned int low,
61 	unsigned long long *variable)
62 {
63 	*variable = (unsigned long long)high << 32;
64 	*variable |= (unsigned long long)low;
65 }
66 
reg_split(unsigned long long variable,unsigned int * high,unsigned int * low)67 static void reg_split(unsigned long long variable, unsigned int *high,
68 	unsigned int *low)
69 {
70 	*low = (unsigned int)variable & 0xFFFFFFFF;
71 	*high = (unsigned int)(variable >> 32);
72 }
73 
74 /*
75  * Wakes up DMA queue.
76  */
tsi148_DMA_irqhandler(struct tsi148_driver * bridge,int channel_mask)77 static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
78 	int channel_mask)
79 {
80 	u32 serviced = 0;
81 
82 	if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
83 		wake_up(&bridge->dma_queue[0]);
84 		serviced |= TSI148_LCSR_INTC_DMA0C;
85 	}
86 	if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
87 		wake_up(&bridge->dma_queue[1]);
88 		serviced |= TSI148_LCSR_INTC_DMA1C;
89 	}
90 
91 	return serviced;
92 }
93 
94 /*
95  * Wake up location monitor queue
96  */
tsi148_LM_irqhandler(struct tsi148_driver * bridge,u32 stat)97 static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
98 {
99 	int i;
100 	u32 serviced = 0;
101 
102 	for (i = 0; i < 4; i++) {
103 		if (stat & TSI148_LCSR_INTS_LMS[i]) {
104 			/* We only enable interrupts if the callback is set */
105 			bridge->lm_callback[i](i);
106 			serviced |= TSI148_LCSR_INTC_LMC[i];
107 		}
108 	}
109 
110 	return serviced;
111 }
112 
113 /*
114  * Wake up mail box queue.
115  *
116  * XXX This functionality is not exposed up though API.
117  */
tsi148_MB_irqhandler(struct vme_bridge * tsi148_bridge,u32 stat)118 static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
119 {
120 	int i;
121 	u32 val;
122 	u32 serviced = 0;
123 	struct tsi148_driver *bridge;
124 
125 	bridge = tsi148_bridge->driver_priv;
126 
127 	for (i = 0; i < 4; i++) {
128 		if (stat & TSI148_LCSR_INTS_MBS[i]) {
129 			val = ioread32be(bridge->base +	TSI148_GCSR_MBOX[i]);
130 			dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
131 				": 0x%x\n", i, val);
132 			serviced |= TSI148_LCSR_INTC_MBC[i];
133 		}
134 	}
135 
136 	return serviced;
137 }
138 
139 /*
140  * Display error & status message when PERR (PCI) exception interrupt occurs.
141  */
tsi148_PERR_irqhandler(struct vme_bridge * tsi148_bridge)142 static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
143 {
144 	struct tsi148_driver *bridge;
145 
146 	bridge = tsi148_bridge->driver_priv;
147 
148 	dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
149 		"attributes: %08x\n",
150 		ioread32be(bridge->base + TSI148_LCSR_EDPAU),
151 		ioread32be(bridge->base + TSI148_LCSR_EDPAL),
152 		ioread32be(bridge->base + TSI148_LCSR_EDPAT));
153 
154 	dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
155 		"completion reg: %08x\n",
156 		ioread32be(bridge->base + TSI148_LCSR_EDPXA),
157 		ioread32be(bridge->base + TSI148_LCSR_EDPXS));
158 
159 	iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
160 
161 	return TSI148_LCSR_INTC_PERRC;
162 }
163 
164 /*
165  * Save address and status when VME error interrupt occurs.
166  */
tsi148_VERR_irqhandler(struct vme_bridge * tsi148_bridge)167 static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
168 {
169 	unsigned int error_addr_high, error_addr_low;
170 	unsigned long long error_addr;
171 	u32 error_attrib;
172 	int error_am;
173 	struct tsi148_driver *bridge;
174 
175 	bridge = tsi148_bridge->driver_priv;
176 
177 	error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
178 	error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
179 	error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
180 	error_am = (error_attrib & TSI148_LCSR_VEAT_AM_M) >> 8;
181 
182 	reg_join(error_addr_high, error_addr_low, &error_addr);
183 
184 	/* Check for exception register overflow (we have lost error data) */
185 	if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
186 		dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
187 			"Occurred\n");
188 	}
189 
190 	if (err_chk)
191 		vme_bus_error_handler(tsi148_bridge, error_addr, error_am);
192 	else
193 		dev_err(tsi148_bridge->parent,
194 			"VME Bus Error at address: 0x%llx, attributes: %08x\n",
195 			error_addr, error_attrib);
196 
197 	/* Clear Status */
198 	iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
199 
200 	return TSI148_LCSR_INTC_VERRC;
201 }
202 
203 /*
204  * Wake up IACK queue.
205  */
tsi148_IACK_irqhandler(struct tsi148_driver * bridge)206 static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
207 {
208 	wake_up(&bridge->iack_queue);
209 
210 	return TSI148_LCSR_INTC_IACKC;
211 }
212 
213 /*
214  * Calling VME bus interrupt callback if provided.
215  */
tsi148_VIRQ_irqhandler(struct vme_bridge * tsi148_bridge,u32 stat)216 static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
217 	u32 stat)
218 {
219 	int vec, i, serviced = 0;
220 	struct tsi148_driver *bridge;
221 
222 	bridge = tsi148_bridge->driver_priv;
223 
224 	for (i = 7; i > 0; i--) {
225 		if (stat & (1 << i)) {
226 			/*
227 			 * Note: Even though the registers are defined as
228 			 * 32-bits in the spec, we only want to issue 8-bit
229 			 * IACK cycles on the bus, read from offset 3.
230 			 */
231 			vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
232 
233 			vme_irq_handler(tsi148_bridge, i, vec);
234 
235 			serviced |= (1 << i);
236 		}
237 	}
238 
239 	return serviced;
240 }
241 
242 /*
243  * Top level interrupt handler.  Clears appropriate interrupt status bits and
244  * then calls appropriate sub handler(s).
245  */
tsi148_irqhandler(int irq,void * ptr)246 static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
247 {
248 	u32 stat, enable, serviced = 0;
249 	struct vme_bridge *tsi148_bridge;
250 	struct tsi148_driver *bridge;
251 
252 	tsi148_bridge = ptr;
253 
254 	bridge = tsi148_bridge->driver_priv;
255 
256 	/* Determine which interrupts are unmasked and set */
257 	enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
258 	stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
259 
260 	/* Only look at unmasked interrupts */
261 	stat &= enable;
262 
263 	if (unlikely(!stat))
264 		return IRQ_NONE;
265 
266 	/* Call subhandlers as appropriate */
267 	/* DMA irqs */
268 	if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
269 		serviced |= tsi148_DMA_irqhandler(bridge, stat);
270 
271 	/* Location monitor irqs */
272 	if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
273 			TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
274 		serviced |= tsi148_LM_irqhandler(bridge, stat);
275 
276 	/* Mail box irqs */
277 	if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
278 			TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
279 		serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
280 
281 	/* PCI bus error */
282 	if (stat & TSI148_LCSR_INTS_PERRS)
283 		serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
284 
285 	/* VME bus error */
286 	if (stat & TSI148_LCSR_INTS_VERRS)
287 		serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
288 
289 	/* IACK irq */
290 	if (stat & TSI148_LCSR_INTS_IACKS)
291 		serviced |= tsi148_IACK_irqhandler(bridge);
292 
293 	/* VME bus irqs */
294 	if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
295 			TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
296 			TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
297 			TSI148_LCSR_INTS_IRQ1S))
298 		serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
299 
300 	/* Clear serviced interrupts */
301 	iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
302 
303 	return IRQ_HANDLED;
304 }
305 
tsi148_irq_init(struct vme_bridge * tsi148_bridge)306 static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
307 {
308 	int result;
309 	unsigned int tmp;
310 	struct pci_dev *pdev;
311 	struct tsi148_driver *bridge;
312 
313 	pdev = to_pci_dev(tsi148_bridge->parent);
314 
315 	bridge = tsi148_bridge->driver_priv;
316 
317 	INIT_LIST_HEAD(&tsi148_bridge->vme_error_handlers);
318 
319 	mutex_init(&tsi148_bridge->irq_mtx);
320 
321 	result = request_irq(pdev->irq,
322 			     tsi148_irqhandler,
323 			     IRQF_SHARED,
324 			     driver_name, tsi148_bridge);
325 	if (result) {
326 		dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
327 			"vector %02X\n", pdev->irq);
328 		return result;
329 	}
330 
331 	/* Enable and unmask interrupts */
332 	tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
333 		TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
334 		TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
335 		TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
336 		TSI148_LCSR_INTEO_IACKEO;
337 
338 	/* This leaves the following interrupts masked.
339 	 * TSI148_LCSR_INTEO_VIEEO
340 	 * TSI148_LCSR_INTEO_SYSFLEO
341 	 * TSI148_LCSR_INTEO_ACFLEO
342 	 */
343 
344 	/* Don't enable Location Monitor interrupts here - they will be
345 	 * enabled when the location monitors are properly configured and
346 	 * a callback has been attached.
347 	 * TSI148_LCSR_INTEO_LM0EO
348 	 * TSI148_LCSR_INTEO_LM1EO
349 	 * TSI148_LCSR_INTEO_LM2EO
350 	 * TSI148_LCSR_INTEO_LM3EO
351 	 */
352 
353 	/* Don't enable VME interrupts until we add a handler, else the board
354 	 * will respond to it and we don't want that unless it knows how to
355 	 * properly deal with it.
356 	 * TSI148_LCSR_INTEO_IRQ7EO
357 	 * TSI148_LCSR_INTEO_IRQ6EO
358 	 * TSI148_LCSR_INTEO_IRQ5EO
359 	 * TSI148_LCSR_INTEO_IRQ4EO
360 	 * TSI148_LCSR_INTEO_IRQ3EO
361 	 * TSI148_LCSR_INTEO_IRQ2EO
362 	 * TSI148_LCSR_INTEO_IRQ1EO
363 	 */
364 
365 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
366 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
367 
368 	return 0;
369 }
370 
tsi148_irq_exit(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)371 static void tsi148_irq_exit(struct vme_bridge *tsi148_bridge,
372 	struct pci_dev *pdev)
373 {
374 	struct tsi148_driver *bridge = tsi148_bridge->driver_priv;
375 
376 	/* Turn off interrupts */
377 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
378 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
379 
380 	/* Clear all interrupts */
381 	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
382 
383 	/* Detach interrupt handler */
384 	free_irq(pdev->irq, tsi148_bridge);
385 }
386 
387 /*
388  * Check to see if an IACk has been received, return true (1) or false (0).
389  */
tsi148_iack_received(struct tsi148_driver * bridge)390 static int tsi148_iack_received(struct tsi148_driver *bridge)
391 {
392 	u32 tmp;
393 
394 	tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
395 
396 	if (tmp & TSI148_LCSR_VICR_IRQS)
397 		return 0;
398 	else
399 		return 1;
400 }
401 
402 /*
403  * Configure VME interrupt
404  */
tsi148_irq_set(struct vme_bridge * tsi148_bridge,int level,int state,int sync)405 static void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
406 	int state, int sync)
407 {
408 	struct pci_dev *pdev;
409 	u32 tmp;
410 	struct tsi148_driver *bridge;
411 
412 	bridge = tsi148_bridge->driver_priv;
413 
414 	/* We need to do the ordering differently for enabling and disabling */
415 	if (state == 0) {
416 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
417 		tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
418 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
419 
420 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
421 		tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
422 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
423 
424 		if (sync != 0) {
425 			pdev = to_pci_dev(tsi148_bridge->parent);
426 			synchronize_irq(pdev->irq);
427 		}
428 	} else {
429 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
430 		tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
431 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
432 
433 		tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
434 		tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
435 		iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
436 	}
437 }
438 
439 /*
440  * Generate a VME bus interrupt at the requested level & vector. Wait for
441  * interrupt to be acked.
442  */
tsi148_irq_generate(struct vme_bridge * tsi148_bridge,int level,int statid)443 static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
444 	int statid)
445 {
446 	u32 tmp;
447 	struct tsi148_driver *bridge;
448 
449 	bridge = tsi148_bridge->driver_priv;
450 
451 	mutex_lock(&bridge->vme_int);
452 
453 	/* Read VICR register */
454 	tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
455 
456 	/* Set Status/ID */
457 	tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
458 		(statid & TSI148_LCSR_VICR_STID_M);
459 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
460 
461 	/* Assert VMEbus IRQ */
462 	tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
463 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
464 
465 	/* XXX Consider implementing a timeout? */
466 	wait_event_interruptible(bridge->iack_queue,
467 		tsi148_iack_received(bridge));
468 
469 	mutex_unlock(&bridge->vme_int);
470 
471 	return 0;
472 }
473 
474 /*
475  * Initialize a slave window with the requested attributes.
476  */
tsi148_slave_set(struct vme_slave_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,dma_addr_t pci_base,u32 aspace,u32 cycle)477 static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
478 	unsigned long long vme_base, unsigned long long size,
479 	dma_addr_t pci_base, u32 aspace, u32 cycle)
480 {
481 	unsigned int i, addr = 0, granularity = 0;
482 	unsigned int temp_ctl = 0;
483 	unsigned int vme_base_low, vme_base_high;
484 	unsigned int vme_bound_low, vme_bound_high;
485 	unsigned int pci_offset_low, pci_offset_high;
486 	unsigned long long vme_bound, pci_offset;
487 	struct vme_bridge *tsi148_bridge;
488 	struct tsi148_driver *bridge;
489 
490 	tsi148_bridge = image->parent;
491 	bridge = tsi148_bridge->driver_priv;
492 
493 	i = image->number;
494 
495 	switch (aspace) {
496 	case VME_A16:
497 		granularity = 0x10;
498 		addr |= TSI148_LCSR_ITAT_AS_A16;
499 		break;
500 	case VME_A24:
501 		granularity = 0x1000;
502 		addr |= TSI148_LCSR_ITAT_AS_A24;
503 		break;
504 	case VME_A32:
505 		granularity = 0x10000;
506 		addr |= TSI148_LCSR_ITAT_AS_A32;
507 		break;
508 	case VME_A64:
509 		granularity = 0x10000;
510 		addr |= TSI148_LCSR_ITAT_AS_A64;
511 		break;
512 	default:
513 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
514 		return -EINVAL;
515 		break;
516 	}
517 
518 	/* Convert 64-bit variables to 2x 32-bit variables */
519 	reg_split(vme_base, &vme_base_high, &vme_base_low);
520 
521 	/*
522 	 * Bound address is a valid address for the window, adjust
523 	 * accordingly
524 	 */
525 	vme_bound = vme_base + size - granularity;
526 	reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
527 	pci_offset = (unsigned long long)pci_base - vme_base;
528 	reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
529 
530 	if (vme_base_low & (granularity - 1)) {
531 		dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
532 		return -EINVAL;
533 	}
534 	if (vme_bound_low & (granularity - 1)) {
535 		dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
536 		return -EINVAL;
537 	}
538 	if (pci_offset_low & (granularity - 1)) {
539 		dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
540 			"alignment\n");
541 		return -EINVAL;
542 	}
543 
544 	/*  Disable while we are mucking around */
545 	temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
546 		TSI148_LCSR_OFFSET_ITAT);
547 	temp_ctl &= ~TSI148_LCSR_ITAT_EN;
548 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
549 		TSI148_LCSR_OFFSET_ITAT);
550 
551 	/* Setup mapping */
552 	iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
553 		TSI148_LCSR_OFFSET_ITSAU);
554 	iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
555 		TSI148_LCSR_OFFSET_ITSAL);
556 	iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
557 		TSI148_LCSR_OFFSET_ITEAU);
558 	iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
559 		TSI148_LCSR_OFFSET_ITEAL);
560 	iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
561 		TSI148_LCSR_OFFSET_ITOFU);
562 	iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
563 		TSI148_LCSR_OFFSET_ITOFL);
564 
565 	/* Setup 2eSST speeds */
566 	temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
567 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
568 	case VME_2eSST160:
569 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
570 		break;
571 	case VME_2eSST267:
572 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
573 		break;
574 	case VME_2eSST320:
575 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
576 		break;
577 	}
578 
579 	/* Setup cycle types */
580 	temp_ctl &= ~(0x1F << 7);
581 	if (cycle & VME_BLT)
582 		temp_ctl |= TSI148_LCSR_ITAT_BLT;
583 	if (cycle & VME_MBLT)
584 		temp_ctl |= TSI148_LCSR_ITAT_MBLT;
585 	if (cycle & VME_2eVME)
586 		temp_ctl |= TSI148_LCSR_ITAT_2eVME;
587 	if (cycle & VME_2eSST)
588 		temp_ctl |= TSI148_LCSR_ITAT_2eSST;
589 	if (cycle & VME_2eSSTB)
590 		temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
591 
592 	/* Setup address space */
593 	temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
594 	temp_ctl |= addr;
595 
596 	temp_ctl &= ~0xF;
597 	if (cycle & VME_SUPER)
598 		temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
599 	if (cycle & VME_USER)
600 		temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
601 	if (cycle & VME_PROG)
602 		temp_ctl |= TSI148_LCSR_ITAT_PGM;
603 	if (cycle & VME_DATA)
604 		temp_ctl |= TSI148_LCSR_ITAT_DATA;
605 
606 	/* Write ctl reg without enable */
607 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
608 		TSI148_LCSR_OFFSET_ITAT);
609 
610 	if (enabled)
611 		temp_ctl |= TSI148_LCSR_ITAT_EN;
612 
613 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
614 		TSI148_LCSR_OFFSET_ITAT);
615 
616 	return 0;
617 }
618 
619 /*
620  * Get slave window configuration.
621  */
tsi148_slave_get(struct vme_slave_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,dma_addr_t * pci_base,u32 * aspace,u32 * cycle)622 static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
623 	unsigned long long *vme_base, unsigned long long *size,
624 	dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
625 {
626 	unsigned int i, granularity = 0, ctl = 0;
627 	unsigned int vme_base_low, vme_base_high;
628 	unsigned int vme_bound_low, vme_bound_high;
629 	unsigned int pci_offset_low, pci_offset_high;
630 	unsigned long long vme_bound, pci_offset;
631 	struct tsi148_driver *bridge;
632 
633 	bridge = image->parent->driver_priv;
634 
635 	i = image->number;
636 
637 	/* Read registers */
638 	ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
639 		TSI148_LCSR_OFFSET_ITAT);
640 
641 	vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
642 		TSI148_LCSR_OFFSET_ITSAU);
643 	vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
644 		TSI148_LCSR_OFFSET_ITSAL);
645 	vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
646 		TSI148_LCSR_OFFSET_ITEAU);
647 	vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
648 		TSI148_LCSR_OFFSET_ITEAL);
649 	pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
650 		TSI148_LCSR_OFFSET_ITOFU);
651 	pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
652 		TSI148_LCSR_OFFSET_ITOFL);
653 
654 	/* Convert 64-bit variables to 2x 32-bit variables */
655 	reg_join(vme_base_high, vme_base_low, vme_base);
656 	reg_join(vme_bound_high, vme_bound_low, &vme_bound);
657 	reg_join(pci_offset_high, pci_offset_low, &pci_offset);
658 
659 	*pci_base = (dma_addr_t)(*vme_base + pci_offset);
660 
661 	*enabled = 0;
662 	*aspace = 0;
663 	*cycle = 0;
664 
665 	if (ctl & TSI148_LCSR_ITAT_EN)
666 		*enabled = 1;
667 
668 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
669 		granularity = 0x10;
670 		*aspace |= VME_A16;
671 	}
672 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
673 		granularity = 0x1000;
674 		*aspace |= VME_A24;
675 	}
676 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
677 		granularity = 0x10000;
678 		*aspace |= VME_A32;
679 	}
680 	if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
681 		granularity = 0x10000;
682 		*aspace |= VME_A64;
683 	}
684 
685 	/* Need granularity before we set the size */
686 	*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
687 
688 
689 	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
690 		*cycle |= VME_2eSST160;
691 	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
692 		*cycle |= VME_2eSST267;
693 	if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
694 		*cycle |= VME_2eSST320;
695 
696 	if (ctl & TSI148_LCSR_ITAT_BLT)
697 		*cycle |= VME_BLT;
698 	if (ctl & TSI148_LCSR_ITAT_MBLT)
699 		*cycle |= VME_MBLT;
700 	if (ctl & TSI148_LCSR_ITAT_2eVME)
701 		*cycle |= VME_2eVME;
702 	if (ctl & TSI148_LCSR_ITAT_2eSST)
703 		*cycle |= VME_2eSST;
704 	if (ctl & TSI148_LCSR_ITAT_2eSSTB)
705 		*cycle |= VME_2eSSTB;
706 
707 	if (ctl & TSI148_LCSR_ITAT_SUPR)
708 		*cycle |= VME_SUPER;
709 	if (ctl & TSI148_LCSR_ITAT_NPRIV)
710 		*cycle |= VME_USER;
711 	if (ctl & TSI148_LCSR_ITAT_PGM)
712 		*cycle |= VME_PROG;
713 	if (ctl & TSI148_LCSR_ITAT_DATA)
714 		*cycle |= VME_DATA;
715 
716 	return 0;
717 }
718 
719 /*
720  * Allocate and map PCI Resource
721  */
tsi148_alloc_resource(struct vme_master_resource * image,unsigned long long size)722 static int tsi148_alloc_resource(struct vme_master_resource *image,
723 	unsigned long long size)
724 {
725 	unsigned long long existing_size;
726 	int retval = 0;
727 	struct pci_dev *pdev;
728 	struct vme_bridge *tsi148_bridge;
729 
730 	tsi148_bridge = image->parent;
731 
732 	pdev = to_pci_dev(tsi148_bridge->parent);
733 
734 	existing_size = (unsigned long long)(image->bus_resource.end -
735 		image->bus_resource.start);
736 
737 	/* If the existing size is OK, return */
738 	if ((size != 0) && (existing_size == (size - 1)))
739 		return 0;
740 
741 	if (existing_size != 0) {
742 		iounmap(image->kern_base);
743 		image->kern_base = NULL;
744 		kfree(image->bus_resource.name);
745 		release_resource(&image->bus_resource);
746 		memset(&image->bus_resource, 0, sizeof(struct resource));
747 	}
748 
749 	/* Exit here if size is zero */
750 	if (size == 0)
751 		return 0;
752 
753 	if (image->bus_resource.name == NULL) {
754 		image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
755 		if (image->bus_resource.name == NULL) {
756 			dev_err(tsi148_bridge->parent, "Unable to allocate "
757 				"memory for resource name\n");
758 			retval = -ENOMEM;
759 			goto err_name;
760 		}
761 	}
762 
763 	sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
764 		image->number);
765 
766 	image->bus_resource.start = 0;
767 	image->bus_resource.end = (unsigned long)size;
768 	image->bus_resource.flags = IORESOURCE_MEM;
769 
770 	retval = pci_bus_alloc_resource(pdev->bus,
771 		&image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
772 		0, NULL, NULL);
773 	if (retval) {
774 		dev_err(tsi148_bridge->parent, "Failed to allocate mem "
775 			"resource for window %d size 0x%lx start 0x%lx\n",
776 			image->number, (unsigned long)size,
777 			(unsigned long)image->bus_resource.start);
778 		goto err_resource;
779 	}
780 
781 	image->kern_base = ioremap_nocache(
782 		image->bus_resource.start, size);
783 	if (image->kern_base == NULL) {
784 		dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
785 		retval = -ENOMEM;
786 		goto err_remap;
787 	}
788 
789 	return 0;
790 
791 err_remap:
792 	release_resource(&image->bus_resource);
793 err_resource:
794 	kfree(image->bus_resource.name);
795 	memset(&image->bus_resource, 0, sizeof(struct resource));
796 err_name:
797 	return retval;
798 }
799 
800 /*
801  * Free and unmap PCI Resource
802  */
tsi148_free_resource(struct vme_master_resource * image)803 static void tsi148_free_resource(struct vme_master_resource *image)
804 {
805 	iounmap(image->kern_base);
806 	image->kern_base = NULL;
807 	release_resource(&image->bus_resource);
808 	kfree(image->bus_resource.name);
809 	memset(&image->bus_resource, 0, sizeof(struct resource));
810 }
811 
812 /*
813  * Set the attributes of an outbound window.
814  */
tsi148_master_set(struct vme_master_resource * image,int enabled,unsigned long long vme_base,unsigned long long size,u32 aspace,u32 cycle,u32 dwidth)815 static int tsi148_master_set(struct vme_master_resource *image, int enabled,
816 	unsigned long long vme_base, unsigned long long size, u32 aspace,
817 	u32 cycle, u32 dwidth)
818 {
819 	int retval = 0;
820 	unsigned int i;
821 	unsigned int temp_ctl = 0;
822 	unsigned int pci_base_low, pci_base_high;
823 	unsigned int pci_bound_low, pci_bound_high;
824 	unsigned int vme_offset_low, vme_offset_high;
825 	unsigned long long pci_bound, vme_offset, pci_base;
826 	struct vme_bridge *tsi148_bridge;
827 	struct tsi148_driver *bridge;
828 	struct pci_bus_region region;
829 	struct pci_dev *pdev;
830 
831 	tsi148_bridge = image->parent;
832 
833 	bridge = tsi148_bridge->driver_priv;
834 
835 	pdev = to_pci_dev(tsi148_bridge->parent);
836 
837 	/* Verify input data */
838 	if (vme_base & 0xFFFF) {
839 		dev_err(tsi148_bridge->parent, "Invalid VME Window "
840 			"alignment\n");
841 		retval = -EINVAL;
842 		goto err_window;
843 	}
844 
845 	if ((size == 0) && (enabled != 0)) {
846 		dev_err(tsi148_bridge->parent, "Size must be non-zero for "
847 			"enabled windows\n");
848 		retval = -EINVAL;
849 		goto err_window;
850 	}
851 
852 	spin_lock(&image->lock);
853 
854 	/* Let's allocate the resource here rather than further up the stack as
855 	 * it avoids pushing loads of bus dependent stuff up the stack. If size
856 	 * is zero, any existing resource will be freed.
857 	 */
858 	retval = tsi148_alloc_resource(image, size);
859 	if (retval) {
860 		spin_unlock(&image->lock);
861 		dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
862 			"resource\n");
863 		goto err_res;
864 	}
865 
866 	if (size == 0) {
867 		pci_base = 0;
868 		pci_bound = 0;
869 		vme_offset = 0;
870 	} else {
871 		pcibios_resource_to_bus(pdev->bus, ®ion,
872 					&image->bus_resource);
873 		pci_base = region.start;
874 
875 		/*
876 		 * Bound address is a valid address for the window, adjust
877 		 * according to window granularity.
878 		 */
879 		pci_bound = pci_base + (size - 0x10000);
880 		vme_offset = vme_base - pci_base;
881 	}
882 
883 	/* Convert 64-bit variables to 2x 32-bit variables */
884 	reg_split(pci_base, &pci_base_high, &pci_base_low);
885 	reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
886 	reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
887 
888 	if (pci_base_low & 0xFFFF) {
889 		spin_unlock(&image->lock);
890 		dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
891 		retval = -EINVAL;
892 		goto err_gran;
893 	}
894 	if (pci_bound_low & 0xFFFF) {
895 		spin_unlock(&image->lock);
896 		dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
897 		retval = -EINVAL;
898 		goto err_gran;
899 	}
900 	if (vme_offset_low & 0xFFFF) {
901 		spin_unlock(&image->lock);
902 		dev_err(tsi148_bridge->parent, "Invalid VME Offset "
903 			"alignment\n");
904 		retval = -EINVAL;
905 		goto err_gran;
906 	}
907 
908 	i = image->number;
909 
910 	/* Disable while we are mucking around */
911 	temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
912 		TSI148_LCSR_OFFSET_OTAT);
913 	temp_ctl &= ~TSI148_LCSR_OTAT_EN;
914 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
915 		TSI148_LCSR_OFFSET_OTAT);
916 
917 	/* Setup 2eSST speeds */
918 	temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
919 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
920 	case VME_2eSST160:
921 		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
922 		break;
923 	case VME_2eSST267:
924 		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
925 		break;
926 	case VME_2eSST320:
927 		temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
928 		break;
929 	}
930 
931 	/* Setup cycle types */
932 	if (cycle & VME_BLT) {
933 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
934 		temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
935 	}
936 	if (cycle & VME_MBLT) {
937 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
938 		temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
939 	}
940 	if (cycle & VME_2eVME) {
941 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
942 		temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
943 	}
944 	if (cycle & VME_2eSST) {
945 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
946 		temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
947 	}
948 	if (cycle & VME_2eSSTB) {
949 		dev_warn(tsi148_bridge->parent, "Currently not setting "
950 			"Broadcast Select Registers\n");
951 		temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
952 		temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
953 	}
954 
955 	/* Setup data width */
956 	temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
957 	switch (dwidth) {
958 	case VME_D16:
959 		temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
960 		break;
961 	case VME_D32:
962 		temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
963 		break;
964 	default:
965 		spin_unlock(&image->lock);
966 		dev_err(tsi148_bridge->parent, "Invalid data width\n");
967 		retval = -EINVAL;
968 		goto err_dwidth;
969 	}
970 
971 	/* Setup address space */
972 	temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
973 	switch (aspace) {
974 	case VME_A16:
975 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
976 		break;
977 	case VME_A24:
978 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
979 		break;
980 	case VME_A32:
981 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
982 		break;
983 	case VME_A64:
984 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
985 		break;
986 	case VME_CRCSR:
987 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
988 		break;
989 	case VME_USER1:
990 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
991 		break;
992 	case VME_USER2:
993 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
994 		break;
995 	case VME_USER3:
996 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
997 		break;
998 	case VME_USER4:
999 		temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1000 		break;
1001 	default:
1002 		spin_unlock(&image->lock);
1003 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
1004 		retval = -EINVAL;
1005 		goto err_aspace;
1006 		break;
1007 	}
1008 
1009 	temp_ctl &= ~(3<<4);
1010 	if (cycle & VME_SUPER)
1011 		temp_ctl |= TSI148_LCSR_OTAT_SUP;
1012 	if (cycle & VME_PROG)
1013 		temp_ctl |= TSI148_LCSR_OTAT_PGM;
1014 
1015 	/* Setup mapping */
1016 	iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
1017 		TSI148_LCSR_OFFSET_OTSAU);
1018 	iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
1019 		TSI148_LCSR_OFFSET_OTSAL);
1020 	iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
1021 		TSI148_LCSR_OFFSET_OTEAU);
1022 	iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
1023 		TSI148_LCSR_OFFSET_OTEAL);
1024 	iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
1025 		TSI148_LCSR_OFFSET_OTOFU);
1026 	iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
1027 		TSI148_LCSR_OFFSET_OTOFL);
1028 
1029 	/* Write ctl reg without enable */
1030 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1031 		TSI148_LCSR_OFFSET_OTAT);
1032 
1033 	if (enabled)
1034 		temp_ctl |= TSI148_LCSR_OTAT_EN;
1035 
1036 	iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
1037 		TSI148_LCSR_OFFSET_OTAT);
1038 
1039 	spin_unlock(&image->lock);
1040 	return 0;
1041 
1042 err_aspace:
1043 err_dwidth:
1044 err_gran:
1045 	tsi148_free_resource(image);
1046 err_res:
1047 err_window:
1048 	return retval;
1049 
1050 }
1051 
1052 /*
1053  * Set the attributes of an outbound window.
1054  *
1055  * XXX Not parsing prefetch information.
1056  */
__tsi148_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)1057 static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
1058 	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1059 	u32 *cycle, u32 *dwidth)
1060 {
1061 	unsigned int i, ctl;
1062 	unsigned int pci_base_low, pci_base_high;
1063 	unsigned int pci_bound_low, pci_bound_high;
1064 	unsigned int vme_offset_low, vme_offset_high;
1065 
1066 	unsigned long long pci_base, pci_bound, vme_offset;
1067 	struct tsi148_driver *bridge;
1068 
1069 	bridge = image->parent->driver_priv;
1070 
1071 	i = image->number;
1072 
1073 	ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1074 		TSI148_LCSR_OFFSET_OTAT);
1075 
1076 	pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1077 		TSI148_LCSR_OFFSET_OTSAU);
1078 	pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1079 		TSI148_LCSR_OFFSET_OTSAL);
1080 	pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1081 		TSI148_LCSR_OFFSET_OTEAU);
1082 	pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1083 		TSI148_LCSR_OFFSET_OTEAL);
1084 	vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1085 		TSI148_LCSR_OFFSET_OTOFU);
1086 	vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1087 		TSI148_LCSR_OFFSET_OTOFL);
1088 
1089 	/* Convert 64-bit variables to 2x 32-bit variables */
1090 	reg_join(pci_base_high, pci_base_low, &pci_base);
1091 	reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1092 	reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1093 
1094 	*vme_base = pci_base + vme_offset;
1095 	*size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1096 
1097 	*enabled = 0;
1098 	*aspace = 0;
1099 	*cycle = 0;
1100 	*dwidth = 0;
1101 
1102 	if (ctl & TSI148_LCSR_OTAT_EN)
1103 		*enabled = 1;
1104 
1105 	/* Setup address space */
1106 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1107 		*aspace |= VME_A16;
1108 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1109 		*aspace |= VME_A24;
1110 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1111 		*aspace |= VME_A32;
1112 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1113 		*aspace |= VME_A64;
1114 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1115 		*aspace |= VME_CRCSR;
1116 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1117 		*aspace |= VME_USER1;
1118 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1119 		*aspace |= VME_USER2;
1120 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1121 		*aspace |= VME_USER3;
1122 	if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1123 		*aspace |= VME_USER4;
1124 
1125 	/* Setup 2eSST speeds */
1126 	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1127 		*cycle |= VME_2eSST160;
1128 	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1129 		*cycle |= VME_2eSST267;
1130 	if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1131 		*cycle |= VME_2eSST320;
1132 
1133 	/* Setup cycle types */
1134 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
1135 		*cycle |= VME_SCT;
1136 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
1137 		*cycle |= VME_BLT;
1138 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
1139 		*cycle |= VME_MBLT;
1140 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
1141 		*cycle |= VME_2eVME;
1142 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
1143 		*cycle |= VME_2eSST;
1144 	if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
1145 		*cycle |= VME_2eSSTB;
1146 
1147 	if (ctl & TSI148_LCSR_OTAT_SUP)
1148 		*cycle |= VME_SUPER;
1149 	else
1150 		*cycle |= VME_USER;
1151 
1152 	if (ctl & TSI148_LCSR_OTAT_PGM)
1153 		*cycle |= VME_PROG;
1154 	else
1155 		*cycle |= VME_DATA;
1156 
1157 	/* Setup data width */
1158 	if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1159 		*dwidth = VME_D16;
1160 	if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1161 		*dwidth = VME_D32;
1162 
1163 	return 0;
1164 }
1165 
1166 
tsi148_master_get(struct vme_master_resource * image,int * enabled,unsigned long long * vme_base,unsigned long long * size,u32 * aspace,u32 * cycle,u32 * dwidth)1167 static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
1168 	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
1169 	u32 *cycle, u32 *dwidth)
1170 {
1171 	int retval;
1172 
1173 	spin_lock(&image->lock);
1174 
1175 	retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1176 		cycle, dwidth);
1177 
1178 	spin_unlock(&image->lock);
1179 
1180 	return retval;
1181 }
1182 
tsi148_master_read(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)1183 static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1184 	size_t count, loff_t offset)
1185 {
1186 	int retval, enabled;
1187 	unsigned long long vme_base, size;
1188 	u32 aspace, cycle, dwidth;
1189 	struct vme_error_handler *handler = NULL;
1190 	struct vme_bridge *tsi148_bridge;
1191 	void __iomem *addr = image->kern_base + offset;
1192 	unsigned int done = 0;
1193 	unsigned int count32;
1194 
1195 	tsi148_bridge = image->parent;
1196 
1197 	spin_lock(&image->lock);
1198 
1199 	if (err_chk) {
1200 		__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1201 				    &cycle, &dwidth);
1202 		handler = vme_register_error_handler(tsi148_bridge, aspace,
1203 						     vme_base + offset, count);
1204 		if (!handler) {
1205 			spin_unlock(&image->lock);
1206 			return -ENOMEM;
1207 		}
1208 	}
1209 
1210 	/* The following code handles VME address alignment. We cannot use
1211 	 * memcpy_xxx here because it may cut data transfers in to 8-bit
1212 	 * cycles when D16 or D32 cycles are required on the VME bus.
1213 	 * On the other hand, the bridge itself assures that the maximum data
1214 	 * cycle configured for the transfer is used and splits it
1215 	 * automatically for non-aligned addresses, so we don't want the
1216 	 * overhead of needlessly forcing small transfers for the entire cycle.
1217 	 */
1218 	if ((uintptr_t)addr & 0x1) {
1219 		*(u8 *)buf = ioread8(addr);
1220 		done += 1;
1221 		if (done == count)
1222 			goto out;
1223 	}
1224 	if ((uintptr_t)(addr + done) & 0x2) {
1225 		if ((count - done) < 2) {
1226 			*(u8 *)(buf + done) = ioread8(addr + done);
1227 			done += 1;
1228 			goto out;
1229 		} else {
1230 			*(u16 *)(buf + done) = ioread16(addr + done);
1231 			done += 2;
1232 		}
1233 	}
1234 
1235 	count32 = (count - done) & ~0x3;
1236 	while (done < count32) {
1237 		*(u32 *)(buf + done) = ioread32(addr + done);
1238 		done += 4;
1239 	}
1240 
1241 	if ((count - done) & 0x2) {
1242 		*(u16 *)(buf + done) = ioread16(addr + done);
1243 		done += 2;
1244 	}
1245 	if ((count - done) & 0x1) {
1246 		*(u8 *)(buf + done) = ioread8(addr + done);
1247 		done += 1;
1248 	}
1249 
1250 out:
1251 	retval = count;
1252 
1253 	if (err_chk) {
1254 		if (handler->num_errors) {
1255 			dev_err(image->parent->parent,
1256 				"First VME read error detected an at address 0x%llx\n",
1257 				handler->first_error);
1258 			retval = handler->first_error - (vme_base + offset);
1259 		}
1260 		vme_unregister_error_handler(handler);
1261 	}
1262 
1263 	spin_unlock(&image->lock);
1264 
1265 	return retval;
1266 }
1267 
1268 
tsi148_master_write(struct vme_master_resource * image,void * buf,size_t count,loff_t offset)1269 static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1270 	size_t count, loff_t offset)
1271 {
1272 	int retval = 0, enabled;
1273 	unsigned long long vme_base, size;
1274 	u32 aspace, cycle, dwidth;
1275 	void __iomem *addr = image->kern_base + offset;
1276 	unsigned int done = 0;
1277 	unsigned int count32;
1278 
1279 	struct vme_error_handler *handler = NULL;
1280 	struct vme_bridge *tsi148_bridge;
1281 	struct tsi148_driver *bridge;
1282 
1283 	tsi148_bridge = image->parent;
1284 
1285 	bridge = tsi148_bridge->driver_priv;
1286 
1287 	spin_lock(&image->lock);
1288 
1289 	if (err_chk) {
1290 		__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace,
1291 				    &cycle, &dwidth);
1292 		handler = vme_register_error_handler(tsi148_bridge, aspace,
1293 						     vme_base + offset, count);
1294 		if (!handler) {
1295 			spin_unlock(&image->lock);
1296 			return -ENOMEM;
1297 		}
1298 	}
1299 
1300 	/* Here we apply for the same strategy we do in master_read
1301 	 * function in order to assure the correct cycles.
1302 	 */
1303 	if ((uintptr_t)addr & 0x1) {
1304 		iowrite8(*(u8 *)buf, addr);
1305 		done += 1;
1306 		if (done == count)
1307 			goto out;
1308 	}
1309 	if ((uintptr_t)(addr + done) & 0x2) {
1310 		if ((count - done) < 2) {
1311 			iowrite8(*(u8 *)(buf + done), addr + done);
1312 			done += 1;
1313 			goto out;
1314 		} else {
1315 			iowrite16(*(u16 *)(buf + done), addr + done);
1316 			done += 2;
1317 		}
1318 	}
1319 
1320 	count32 = (count - done) & ~0x3;
1321 	while (done < count32) {
1322 		iowrite32(*(u32 *)(buf + done), addr + done);
1323 		done += 4;
1324 	}
1325 
1326 	if ((count - done) & 0x2) {
1327 		iowrite16(*(u16 *)(buf + done), addr + done);
1328 		done += 2;
1329 	}
1330 	if ((count - done) & 0x1) {
1331 		iowrite8(*(u8 *)(buf + done), addr + done);
1332 		done += 1;
1333 	}
1334 
1335 out:
1336 	retval = count;
1337 
1338 	/*
1339 	 * Writes are posted. We need to do a read on the VME bus to flush out
1340 	 * all of the writes before we check for errors. We can't guarantee
1341 	 * that reading the data we have just written is safe. It is believed
1342 	 * that there isn't any read, write re-ordering, so we can read any
1343 	 * location in VME space, so lets read the Device ID from the tsi148's
1344 	 * own registers as mapped into CR/CSR space.
1345 	 *
1346 	 * We check for saved errors in the written address range/space.
1347 	 */
1348 
1349 	if (err_chk) {
1350 		ioread16(bridge->flush_image->kern_base + 0x7F000);
1351 
1352 		if (handler->num_errors) {
1353 			dev_warn(tsi148_bridge->parent,
1354 				 "First VME write error detected an at address 0x%llx\n",
1355 				 handler->first_error);
1356 			retval = handler->first_error - (vme_base + offset);
1357 		}
1358 		vme_unregister_error_handler(handler);
1359 	}
1360 
1361 	spin_unlock(&image->lock);
1362 
1363 	return retval;
1364 }
1365 
1366 /*
1367  * Perform an RMW cycle on the VME bus.
1368  *
1369  * Requires a previously configured master window, returns final value.
1370  */
tsi148_master_rmw(struct vme_master_resource * image,unsigned int mask,unsigned int compare,unsigned int swap,loff_t offset)1371 static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1372 	unsigned int mask, unsigned int compare, unsigned int swap,
1373 	loff_t offset)
1374 {
1375 	unsigned long long pci_addr;
1376 	unsigned int pci_addr_high, pci_addr_low;
1377 	u32 tmp, result;
1378 	int i;
1379 	struct tsi148_driver *bridge;
1380 
1381 	bridge = image->parent->driver_priv;
1382 
1383 	/* Find the PCI address that maps to the desired VME address */
1384 	i = image->number;
1385 
1386 	/* Locking as we can only do one of these at a time */
1387 	mutex_lock(&bridge->vme_rmw);
1388 
1389 	/* Lock image */
1390 	spin_lock(&image->lock);
1391 
1392 	pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1393 		TSI148_LCSR_OFFSET_OTSAU);
1394 	pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
1395 		TSI148_LCSR_OFFSET_OTSAL);
1396 
1397 	reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1398 	reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1399 
1400 	/* Configure registers */
1401 	iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
1402 	iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
1403 	iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
1404 	iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
1405 	iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
1406 
1407 	/* Enable RMW */
1408 	tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1409 	tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1410 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1411 
1412 	/* Kick process off with a read to the required address. */
1413 	result = ioread32be(image->kern_base + offset);
1414 
1415 	/* Disable RMW */
1416 	tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
1417 	tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1418 	iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
1419 
1420 	spin_unlock(&image->lock);
1421 
1422 	mutex_unlock(&bridge->vme_rmw);
1423 
1424 	return result;
1425 }
1426 
tsi148_dma_set_vme_src_attributes(struct device * dev,__be32 * attr,u32 aspace,u32 cycle,u32 dwidth)1427 static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr,
1428 	u32 aspace, u32 cycle, u32 dwidth)
1429 {
1430 	u32 val;
1431 
1432 	val = be32_to_cpu(*attr);
1433 
1434 	/* Setup 2eSST speeds */
1435 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1436 	case VME_2eSST160:
1437 		val |= TSI148_LCSR_DSAT_2eSSTM_160;
1438 		break;
1439 	case VME_2eSST267:
1440 		val |= TSI148_LCSR_DSAT_2eSSTM_267;
1441 		break;
1442 	case VME_2eSST320:
1443 		val |= TSI148_LCSR_DSAT_2eSSTM_320;
1444 		break;
1445 	}
1446 
1447 	/* Setup cycle types */
1448 	if (cycle & VME_SCT)
1449 		val |= TSI148_LCSR_DSAT_TM_SCT;
1450 
1451 	if (cycle & VME_BLT)
1452 		val |= TSI148_LCSR_DSAT_TM_BLT;
1453 
1454 	if (cycle & VME_MBLT)
1455 		val |= TSI148_LCSR_DSAT_TM_MBLT;
1456 
1457 	if (cycle & VME_2eVME)
1458 		val |= TSI148_LCSR_DSAT_TM_2eVME;
1459 
1460 	if (cycle & VME_2eSST)
1461 		val |= TSI148_LCSR_DSAT_TM_2eSST;
1462 
1463 	if (cycle & VME_2eSSTB) {
1464 		dev_err(dev, "Currently not setting Broadcast Select "
1465 			"Registers\n");
1466 		val |= TSI148_LCSR_DSAT_TM_2eSSTB;
1467 	}
1468 
1469 	/* Setup data width */
1470 	switch (dwidth) {
1471 	case VME_D16:
1472 		val |= TSI148_LCSR_DSAT_DBW_16;
1473 		break;
1474 	case VME_D32:
1475 		val |= TSI148_LCSR_DSAT_DBW_32;
1476 		break;
1477 	default:
1478 		dev_err(dev, "Invalid data width\n");
1479 		return -EINVAL;
1480 	}
1481 
1482 	/* Setup address space */
1483 	switch (aspace) {
1484 	case VME_A16:
1485 		val |= TSI148_LCSR_DSAT_AMODE_A16;
1486 		break;
1487 	case VME_A24:
1488 		val |= TSI148_LCSR_DSAT_AMODE_A24;
1489 		break;
1490 	case VME_A32:
1491 		val |= TSI148_LCSR_DSAT_AMODE_A32;
1492 		break;
1493 	case VME_A64:
1494 		val |= TSI148_LCSR_DSAT_AMODE_A64;
1495 		break;
1496 	case VME_CRCSR:
1497 		val |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1498 		break;
1499 	case VME_USER1:
1500 		val |= TSI148_LCSR_DSAT_AMODE_USER1;
1501 		break;
1502 	case VME_USER2:
1503 		val |= TSI148_LCSR_DSAT_AMODE_USER2;
1504 		break;
1505 	case VME_USER3:
1506 		val |= TSI148_LCSR_DSAT_AMODE_USER3;
1507 		break;
1508 	case VME_USER4:
1509 		val |= TSI148_LCSR_DSAT_AMODE_USER4;
1510 		break;
1511 	default:
1512 		dev_err(dev, "Invalid address space\n");
1513 		return -EINVAL;
1514 		break;
1515 	}
1516 
1517 	if (cycle & VME_SUPER)
1518 		val |= TSI148_LCSR_DSAT_SUP;
1519 	if (cycle & VME_PROG)
1520 		val |= TSI148_LCSR_DSAT_PGM;
1521 
1522 	*attr = cpu_to_be32(val);
1523 
1524 	return 0;
1525 }
1526 
tsi148_dma_set_vme_dest_attributes(struct device * dev,__be32 * attr,u32 aspace,u32 cycle,u32 dwidth)1527 static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr,
1528 	u32 aspace, u32 cycle, u32 dwidth)
1529 {
1530 	u32 val;
1531 
1532 	val = be32_to_cpu(*attr);
1533 
1534 	/* Setup 2eSST speeds */
1535 	switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1536 	case VME_2eSST160:
1537 		val |= TSI148_LCSR_DDAT_2eSSTM_160;
1538 		break;
1539 	case VME_2eSST267:
1540 		val |= TSI148_LCSR_DDAT_2eSSTM_267;
1541 		break;
1542 	case VME_2eSST320:
1543 		val |= TSI148_LCSR_DDAT_2eSSTM_320;
1544 		break;
1545 	}
1546 
1547 	/* Setup cycle types */
1548 	if (cycle & VME_SCT)
1549 		val |= TSI148_LCSR_DDAT_TM_SCT;
1550 
1551 	if (cycle & VME_BLT)
1552 		val |= TSI148_LCSR_DDAT_TM_BLT;
1553 
1554 	if (cycle & VME_MBLT)
1555 		val |= TSI148_LCSR_DDAT_TM_MBLT;
1556 
1557 	if (cycle & VME_2eVME)
1558 		val |= TSI148_LCSR_DDAT_TM_2eVME;
1559 
1560 	if (cycle & VME_2eSST)
1561 		val |= TSI148_LCSR_DDAT_TM_2eSST;
1562 
1563 	if (cycle & VME_2eSSTB) {
1564 		dev_err(dev, "Currently not setting Broadcast Select "
1565 			"Registers\n");
1566 		val |= TSI148_LCSR_DDAT_TM_2eSSTB;
1567 	}
1568 
1569 	/* Setup data width */
1570 	switch (dwidth) {
1571 	case VME_D16:
1572 		val |= TSI148_LCSR_DDAT_DBW_16;
1573 		break;
1574 	case VME_D32:
1575 		val |= TSI148_LCSR_DDAT_DBW_32;
1576 		break;
1577 	default:
1578 		dev_err(dev, "Invalid data width\n");
1579 		return -EINVAL;
1580 	}
1581 
1582 	/* Setup address space */
1583 	switch (aspace) {
1584 	case VME_A16:
1585 		val |= TSI148_LCSR_DDAT_AMODE_A16;
1586 		break;
1587 	case VME_A24:
1588 		val |= TSI148_LCSR_DDAT_AMODE_A24;
1589 		break;
1590 	case VME_A32:
1591 		val |= TSI148_LCSR_DDAT_AMODE_A32;
1592 		break;
1593 	case VME_A64:
1594 		val |= TSI148_LCSR_DDAT_AMODE_A64;
1595 		break;
1596 	case VME_CRCSR:
1597 		val |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1598 		break;
1599 	case VME_USER1:
1600 		val |= TSI148_LCSR_DDAT_AMODE_USER1;
1601 		break;
1602 	case VME_USER2:
1603 		val |= TSI148_LCSR_DDAT_AMODE_USER2;
1604 		break;
1605 	case VME_USER3:
1606 		val |= TSI148_LCSR_DDAT_AMODE_USER3;
1607 		break;
1608 	case VME_USER4:
1609 		val |= TSI148_LCSR_DDAT_AMODE_USER4;
1610 		break;
1611 	default:
1612 		dev_err(dev, "Invalid address space\n");
1613 		return -EINVAL;
1614 		break;
1615 	}
1616 
1617 	if (cycle & VME_SUPER)
1618 		val |= TSI148_LCSR_DDAT_SUP;
1619 	if (cycle & VME_PROG)
1620 		val |= TSI148_LCSR_DDAT_PGM;
1621 
1622 	*attr = cpu_to_be32(val);
1623 
1624 	return 0;
1625 }
1626 
1627 /*
1628  * Add a link list descriptor to the list
1629  *
1630  * Note: DMA engine expects the DMA descriptor to be big endian.
1631  */
tsi148_dma_list_add(struct vme_dma_list * list,struct vme_dma_attr * src,struct vme_dma_attr * dest,size_t count)1632 static int tsi148_dma_list_add(struct vme_dma_list *list,
1633 	struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1634 {
1635 	struct tsi148_dma_entry *entry, *prev;
1636 	u32 address_high, address_low, val;
1637 	struct vme_dma_pattern *pattern_attr;
1638 	struct vme_dma_pci *pci_attr;
1639 	struct vme_dma_vme *vme_attr;
1640 	int retval = 0;
1641 	struct vme_bridge *tsi148_bridge;
1642 
1643 	tsi148_bridge = list->parent->parent;
1644 
1645 	/* Descriptor must be aligned on 64-bit boundaries */
1646 	entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1647 	if (entry == NULL) {
1648 		dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
1649 			"dma resource structure\n");
1650 		retval = -ENOMEM;
1651 		goto err_mem;
1652 	}
1653 
1654 	/* Test descriptor alignment */
1655 	if ((unsigned long)&entry->descriptor & 0x7) {
1656 		dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
1657 			"byte boundary as required: %p\n",
1658 			&entry->descriptor);
1659 		retval = -EINVAL;
1660 		goto err_align;
1661 	}
1662 
1663 	/* Given we are going to fill out the structure, we probably don't
1664 	 * need to zero it, but better safe than sorry for now.
1665 	 */
1666 	memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
1667 
1668 	/* Fill out source part */
1669 	switch (src->type) {
1670 	case VME_DMA_PATTERN:
1671 		pattern_attr = src->private;
1672 
1673 		entry->descriptor.dsal = cpu_to_be32(pattern_attr->pattern);
1674 
1675 		val = TSI148_LCSR_DSAT_TYP_PAT;
1676 
1677 		/* Default behaviour is 32 bit pattern */
1678 		if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
1679 			val |= TSI148_LCSR_DSAT_PSZ;
1680 
1681 		/* It seems that the default behaviour is to increment */
1682 		if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
1683 			val |= TSI148_LCSR_DSAT_NIN;
1684 		entry->descriptor.dsat = cpu_to_be32(val);
1685 		break;
1686 	case VME_DMA_PCI:
1687 		pci_attr = src->private;
1688 
1689 		reg_split((unsigned long long)pci_attr->address, &address_high,
1690 			&address_low);
1691 		entry->descriptor.dsau = cpu_to_be32(address_high);
1692 		entry->descriptor.dsal = cpu_to_be32(address_low);
1693 		entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_PCI);
1694 		break;
1695 	case VME_DMA_VME:
1696 		vme_attr = src->private;
1697 
1698 		reg_split((unsigned long long)vme_attr->address, &address_high,
1699 			&address_low);
1700 		entry->descriptor.dsau = cpu_to_be32(address_high);
1701 		entry->descriptor.dsal = cpu_to_be32(address_low);
1702 		entry->descriptor.dsat = cpu_to_be32(TSI148_LCSR_DSAT_TYP_VME);
1703 
1704 		retval = tsi148_dma_set_vme_src_attributes(
1705 			tsi148_bridge->parent, &entry->descriptor.dsat,
1706 			vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1707 		if (retval < 0)
1708 			goto err_source;
1709 		break;
1710 	default:
1711 		dev_err(tsi148_bridge->parent, "Invalid source type\n");
1712 		retval = -EINVAL;
1713 		goto err_source;
1714 		break;
1715 	}
1716 
1717 	/* Assume last link - this will be over-written by adding another */
1718 	entry->descriptor.dnlau = cpu_to_be32(0);
1719 	entry->descriptor.dnlal = cpu_to_be32(TSI148_LCSR_DNLAL_LLA);
1720 
1721 	/* Fill out destination part */
1722 	switch (dest->type) {
1723 	case VME_DMA_PCI:
1724 		pci_attr = dest->private;
1725 
1726 		reg_split((unsigned long long)pci_attr->address, &address_high,
1727 			&address_low);
1728 		entry->descriptor.ddau = cpu_to_be32(address_high);
1729 		entry->descriptor.ddal = cpu_to_be32(address_low);
1730 		entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_PCI);
1731 		break;
1732 	case VME_DMA_VME:
1733 		vme_attr = dest->private;
1734 
1735 		reg_split((unsigned long long)vme_attr->address, &address_high,
1736 			&address_low);
1737 		entry->descriptor.ddau = cpu_to_be32(address_high);
1738 		entry->descriptor.ddal = cpu_to_be32(address_low);
1739 		entry->descriptor.ddat = cpu_to_be32(TSI148_LCSR_DDAT_TYP_VME);
1740 
1741 		retval = tsi148_dma_set_vme_dest_attributes(
1742 			tsi148_bridge->parent, &entry->descriptor.ddat,
1743 			vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
1744 		if (retval < 0)
1745 			goto err_dest;
1746 		break;
1747 	default:
1748 		dev_err(tsi148_bridge->parent, "Invalid destination type\n");
1749 		retval = -EINVAL;
1750 		goto err_dest;
1751 		break;
1752 	}
1753 
1754 	/* Fill out count */
1755 	entry->descriptor.dcnt = cpu_to_be32((u32)count);
1756 
1757 	/* Add to list */
1758 	list_add_tail(&entry->list, &list->entries);
1759 
1760 	entry->dma_handle = dma_map_single(tsi148_bridge->parent,
1761 		&entry->descriptor,
1762 		sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1763 	if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) {
1764 		dev_err(tsi148_bridge->parent, "DMA mapping error\n");
1765 		retval = -EINVAL;
1766 		goto err_dma;
1767 	}
1768 
1769 	/* Fill out previous descriptors "Next Address" */
1770 	if (entry->list.prev != &list->entries) {
1771 		reg_split((unsigned long long)entry->dma_handle, &address_high,
1772 			&address_low);
1773 		prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1774 				  list);
1775 		prev->descriptor.dnlau = cpu_to_be32(address_high);
1776 		prev->descriptor.dnlal = cpu_to_be32(address_low);
1777 
1778 	}
1779 
1780 	return 0;
1781 
1782 err_dma:
1783 err_dest:
1784 err_source:
1785 err_align:
1786 		kfree(entry);
1787 err_mem:
1788 	return retval;
1789 }
1790 
1791 /*
1792  * Check to see if the provided DMA channel is busy.
1793  */
tsi148_dma_busy(struct vme_bridge * tsi148_bridge,int channel)1794 static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
1795 {
1796 	u32 tmp;
1797 	struct tsi148_driver *bridge;
1798 
1799 	bridge = tsi148_bridge->driver_priv;
1800 
1801 	tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1802 		TSI148_LCSR_OFFSET_DSTA);
1803 
1804 	if (tmp & TSI148_LCSR_DSTA_BSY)
1805 		return 0;
1806 	else
1807 		return 1;
1808 
1809 }
1810 
1811 /*
1812  * Execute a previously generated link list
1813  *
1814  * XXX Need to provide control register configuration.
1815  */
tsi148_dma_list_exec(struct vme_dma_list * list)1816 static int tsi148_dma_list_exec(struct vme_dma_list *list)
1817 {
1818 	struct vme_dma_resource *ctrlr;
1819 	int channel, retval;
1820 	struct tsi148_dma_entry *entry;
1821 	u32 bus_addr_high, bus_addr_low;
1822 	u32 val, dctlreg = 0;
1823 	struct vme_bridge *tsi148_bridge;
1824 	struct tsi148_driver *bridge;
1825 
1826 	ctrlr = list->parent;
1827 
1828 	tsi148_bridge = ctrlr->parent;
1829 
1830 	bridge = tsi148_bridge->driver_priv;
1831 
1832 	mutex_lock(&ctrlr->mtx);
1833 
1834 	channel = ctrlr->number;
1835 
1836 	if (!list_empty(&ctrlr->running)) {
1837 		/*
1838 		 * XXX We have an active DMA transfer and currently haven't
1839 		 *     sorted out the mechanism for "pending" DMA transfers.
1840 		 *     Return busy.
1841 		 */
1842 		/* Need to add to pending here */
1843 		mutex_unlock(&ctrlr->mtx);
1844 		return -EBUSY;
1845 	} else {
1846 		list_add(&list->list, &ctrlr->running);
1847 	}
1848 
1849 	/* Get first bus address and write into registers */
1850 	entry = list_first_entry(&list->entries, struct tsi148_dma_entry,
1851 		list);
1852 
1853 	mutex_unlock(&ctrlr->mtx);
1854 
1855 	reg_split(entry->dma_handle, &bus_addr_high, &bus_addr_low);
1856 
1857 	iowrite32be(bus_addr_high, bridge->base +
1858 		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1859 	iowrite32be(bus_addr_low, bridge->base +
1860 		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1861 
1862 	dctlreg = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1863 		TSI148_LCSR_OFFSET_DCTL);
1864 
1865 	/* Start the operation */
1866 	iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
1867 		TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1868 
1869 	retval = wait_event_interruptible(bridge->dma_queue[channel],
1870 		tsi148_dma_busy(ctrlr->parent, channel));
1871 
1872 	if (retval) {
1873 		iowrite32be(dctlreg | TSI148_LCSR_DCTL_ABT, bridge->base +
1874 			TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1875 		/* Wait for the operation to abort */
1876 		wait_event(bridge->dma_queue[channel],
1877 			   tsi148_dma_busy(ctrlr->parent, channel));
1878 		retval = -EINTR;
1879 		goto exit;
1880 	}
1881 
1882 	/*
1883 	 * Read status register, this register is valid until we kick off a
1884 	 * new transfer.
1885 	 */
1886 	val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
1887 		TSI148_LCSR_OFFSET_DSTA);
1888 
1889 	if (val & TSI148_LCSR_DSTA_VBE) {
1890 		dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
1891 		retval = -EIO;
1892 	}
1893 
1894 exit:
1895 	/* Remove list from running list */
1896 	mutex_lock(&ctrlr->mtx);
1897 	list_del(&list->list);
1898 	mutex_unlock(&ctrlr->mtx);
1899 
1900 	return retval;
1901 }
1902 
1903 /*
1904  * Clean up a previously generated link list
1905  *
1906  * We have a separate function, don't assume that the chain can't be reused.
1907  */
tsi148_dma_list_empty(struct vme_dma_list * list)1908 static int tsi148_dma_list_empty(struct vme_dma_list *list)
1909 {
1910 	struct list_head *pos, *temp;
1911 	struct tsi148_dma_entry *entry;
1912 
1913 	struct vme_bridge *tsi148_bridge = list->parent->parent;
1914 
1915 	/* detach and free each entry */
1916 	list_for_each_safe(pos, temp, &list->entries) {
1917 		list_del(pos);
1918 		entry = list_entry(pos, struct tsi148_dma_entry, list);
1919 
1920 		dma_unmap_single(tsi148_bridge->parent, entry->dma_handle,
1921 			sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
1922 		kfree(entry);
1923 	}
1924 
1925 	return 0;
1926 }
1927 
1928 /*
1929  * All 4 location monitors reside at the same base - this is therefore a
1930  * system wide configuration.
1931  *
1932  * This does not enable the LM monitor - that should be done when the first
1933  * callback is attached and disabled when the last callback is removed.
1934  */
tsi148_lm_set(struct vme_lm_resource * lm,unsigned long long lm_base,u32 aspace,u32 cycle)1935 static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
1936 	u32 aspace, u32 cycle)
1937 {
1938 	u32 lm_base_high, lm_base_low, lm_ctl = 0;
1939 	int i;
1940 	struct vme_bridge *tsi148_bridge;
1941 	struct tsi148_driver *bridge;
1942 
1943 	tsi148_bridge = lm->parent;
1944 
1945 	bridge = tsi148_bridge->driver_priv;
1946 
1947 	mutex_lock(&lm->mtx);
1948 
1949 	/* If we already have a callback attached, we can't move it! */
1950 	for (i = 0; i < lm->monitors; i++) {
1951 		if (bridge->lm_callback[i] != NULL) {
1952 			mutex_unlock(&lm->mtx);
1953 			dev_err(tsi148_bridge->parent, "Location monitor "
1954 				"callback attached, can't reset\n");
1955 			return -EBUSY;
1956 		}
1957 	}
1958 
1959 	switch (aspace) {
1960 	case VME_A16:
1961 		lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
1962 		break;
1963 	case VME_A24:
1964 		lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
1965 		break;
1966 	case VME_A32:
1967 		lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
1968 		break;
1969 	case VME_A64:
1970 		lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
1971 		break;
1972 	default:
1973 		mutex_unlock(&lm->mtx);
1974 		dev_err(tsi148_bridge->parent, "Invalid address space\n");
1975 		return -EINVAL;
1976 		break;
1977 	}
1978 
1979 	if (cycle & VME_SUPER)
1980 		lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
1981 	if (cycle & VME_USER)
1982 		lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
1983 	if (cycle & VME_PROG)
1984 		lm_ctl |= TSI148_LCSR_LMAT_PGM;
1985 	if (cycle & VME_DATA)
1986 		lm_ctl |= TSI148_LCSR_LMAT_DATA;
1987 
1988 	reg_split(lm_base, &lm_base_high, &lm_base_low);
1989 
1990 	iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
1991 	iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
1992 	iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
1993 
1994 	mutex_unlock(&lm->mtx);
1995 
1996 	return 0;
1997 }
1998 
1999 /* Get configuration of the callback monitor and return whether it is enabled
2000  * or disabled.
2001  */
tsi148_lm_get(struct vme_lm_resource * lm,unsigned long long * lm_base,u32 * aspace,u32 * cycle)2002 static int tsi148_lm_get(struct vme_lm_resource *lm,
2003 	unsigned long long *lm_base, u32 *aspace, u32 *cycle)
2004 {
2005 	u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
2006 	struct tsi148_driver *bridge;
2007 
2008 	bridge = lm->parent->driver_priv;
2009 
2010 	mutex_lock(&lm->mtx);
2011 
2012 	lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
2013 	lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
2014 	lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2015 
2016 	reg_join(lm_base_high, lm_base_low, lm_base);
2017 
2018 	if (lm_ctl & TSI148_LCSR_LMAT_EN)
2019 		enabled = 1;
2020 
2021 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
2022 		*aspace |= VME_A16;
2023 
2024 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
2025 		*aspace |= VME_A24;
2026 
2027 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
2028 		*aspace |= VME_A32;
2029 
2030 	if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
2031 		*aspace |= VME_A64;
2032 
2033 
2034 	if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2035 		*cycle |= VME_SUPER;
2036 	if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2037 		*cycle |= VME_USER;
2038 	if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2039 		*cycle |= VME_PROG;
2040 	if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2041 		*cycle |= VME_DATA;
2042 
2043 	mutex_unlock(&lm->mtx);
2044 
2045 	return enabled;
2046 }
2047 
2048 /*
2049  * Attach a callback to a specific location monitor.
2050  *
2051  * Callback will be passed the monitor triggered.
2052  */
tsi148_lm_attach(struct vme_lm_resource * lm,int monitor,void (* callback)(int))2053 static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
2054 	void (*callback)(int))
2055 {
2056 	u32 lm_ctl, tmp;
2057 	struct vme_bridge *tsi148_bridge;
2058 	struct tsi148_driver *bridge;
2059 
2060 	tsi148_bridge = lm->parent;
2061 
2062 	bridge = tsi148_bridge->driver_priv;
2063 
2064 	mutex_lock(&lm->mtx);
2065 
2066 	/* Ensure that the location monitor is configured - need PGM or DATA */
2067 	lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2068 	if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2069 		mutex_unlock(&lm->mtx);
2070 		dev_err(tsi148_bridge->parent, "Location monitor not properly "
2071 			"configured\n");
2072 		return -EINVAL;
2073 	}
2074 
2075 	/* Check that a callback isn't already attached */
2076 	if (bridge->lm_callback[monitor] != NULL) {
2077 		mutex_unlock(&lm->mtx);
2078 		dev_err(tsi148_bridge->parent, "Existing callback attached\n");
2079 		return -EBUSY;
2080 	}
2081 
2082 	/* Attach callback */
2083 	bridge->lm_callback[monitor] = callback;
2084 
2085 	/* Enable Location Monitor interrupt */
2086 	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2087 	tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2088 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
2089 
2090 	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2091 	tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2092 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2093 
2094 	/* Ensure that global Location Monitor Enable set */
2095 	if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2096 		lm_ctl |= TSI148_LCSR_LMAT_EN;
2097 		iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
2098 	}
2099 
2100 	mutex_unlock(&lm->mtx);
2101 
2102 	return 0;
2103 }
2104 
2105 /*
2106  * Detach a callback function forn a specific location monitor.
2107  */
tsi148_lm_detach(struct vme_lm_resource * lm,int monitor)2108 static int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
2109 {
2110 	u32 lm_en, tmp;
2111 	struct tsi148_driver *bridge;
2112 
2113 	bridge = lm->parent->driver_priv;
2114 
2115 	mutex_lock(&lm->mtx);
2116 
2117 	/* Disable Location Monitor and ensure previous interrupts are clear */
2118 	lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
2119 	lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2120 	iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
2121 
2122 	tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
2123 	tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2124 	iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
2125 
2126 	iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2127 		 bridge->base + TSI148_LCSR_INTC);
2128 
2129 	/* Detach callback */
2130 	bridge->lm_callback[monitor] = NULL;
2131 
2132 	/* If all location monitors disabled, disable global Location Monitor */
2133 	if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2134 			TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2135 		tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
2136 		tmp &= ~TSI148_LCSR_LMAT_EN;
2137 		iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
2138 	}
2139 
2140 	mutex_unlock(&lm->mtx);
2141 
2142 	return 0;
2143 }
2144 
2145 /*
2146  * Determine Geographical Addressing
2147  */
tsi148_slot_get(struct vme_bridge * tsi148_bridge)2148 static int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
2149 {
2150 	u32 slot = 0;
2151 	struct tsi148_driver *bridge;
2152 
2153 	bridge = tsi148_bridge->driver_priv;
2154 
2155 	if (!geoid) {
2156 		slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
2157 		slot = slot & TSI148_LCSR_VSTAT_GA_M;
2158 	} else
2159 		slot = geoid;
2160 
2161 	return (int)slot;
2162 }
2163 
tsi148_alloc_consistent(struct device * parent,size_t size,dma_addr_t * dma)2164 static void *tsi148_alloc_consistent(struct device *parent, size_t size,
2165 	dma_addr_t *dma)
2166 {
2167 	struct pci_dev *pdev;
2168 
2169 	/* Find pci_dev container of dev */
2170 	pdev = to_pci_dev(parent);
2171 
2172 	return pci_alloc_consistent(pdev, size, dma);
2173 }
2174 
tsi148_free_consistent(struct device * parent,size_t size,void * vaddr,dma_addr_t dma)2175 static void tsi148_free_consistent(struct device *parent, size_t size,
2176 	void *vaddr, dma_addr_t dma)
2177 {
2178 	struct pci_dev *pdev;
2179 
2180 	/* Find pci_dev container of dev */
2181 	pdev = to_pci_dev(parent);
2182 
2183 	pci_free_consistent(pdev, size, vaddr, dma);
2184 }
2185 
2186 /*
2187  * Configure CR/CSR space
2188  *
2189  * Access to the CR/CSR can be configured at power-up. The location of the
2190  * CR/CSR registers in the CR/CSR address space is determined by the boards
2191  * Auto-ID or Geographic address. This function ensures that the window is
2192  * enabled at an offset consistent with the boards geopgraphic address.
2193  *
2194  * Each board has a 512kB window, with the highest 4kB being used for the
2195  * boards registers, this means there is a fix length 508kB window which must
2196  * be mapped onto PCI memory.
2197  */
tsi148_crcsr_init(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)2198 static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
2199 	struct pci_dev *pdev)
2200 {
2201 	u32 cbar, crat, vstat;
2202 	u32 crcsr_bus_high, crcsr_bus_low;
2203 	int retval;
2204 	struct tsi148_driver *bridge;
2205 
2206 	bridge = tsi148_bridge->driver_priv;
2207 
2208 	/* Allocate mem for CR/CSR image */
2209 	bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2210 						     &bridge->crcsr_bus);
2211 	if (bridge->crcsr_kernel == NULL) {
2212 		dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
2213 			"CR/CSR image\n");
2214 		return -ENOMEM;
2215 	}
2216 
2217 	reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2218 
2219 	iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
2220 	iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
2221 
2222 	/* Ensure that the CR/CSR is configured at the correct offset */
2223 	cbar = ioread32be(bridge->base + TSI148_CBAR);
2224 	cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2225 
2226 	vstat = tsi148_slot_get(tsi148_bridge);
2227 
2228 	if (cbar != vstat) {
2229 		cbar = vstat;
2230 		dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
2231 		iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
2232 	}
2233 	dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
2234 
2235 	crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2236 	if (crat & TSI148_LCSR_CRAT_EN)
2237 		dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
2238 	else {
2239 		dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
2240 		iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2241 			bridge->base + TSI148_LCSR_CRAT);
2242 	}
2243 
2244 	/* If we want flushed, error-checked writes, set up a window
2245 	 * over the CR/CSR registers. We read from here to safely flush
2246 	 * through VME writes.
2247 	 */
2248 	if (err_chk) {
2249 		retval = tsi148_master_set(bridge->flush_image, 1,
2250 			(vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
2251 			VME_D16);
2252 		if (retval)
2253 			dev_err(tsi148_bridge->parent, "Configuring flush image"
2254 				" failed\n");
2255 	}
2256 
2257 	return 0;
2258 
2259 }
2260 
tsi148_crcsr_exit(struct vme_bridge * tsi148_bridge,struct pci_dev * pdev)2261 static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
2262 	struct pci_dev *pdev)
2263 {
2264 	u32 crat;
2265 	struct tsi148_driver *bridge;
2266 
2267 	bridge = tsi148_bridge->driver_priv;
2268 
2269 	/* Turn off CR/CSR space */
2270 	crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
2271 	iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2272 		bridge->base + TSI148_LCSR_CRAT);
2273 
2274 	/* Free image */
2275 	iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
2276 	iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
2277 
2278 	pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
2279 		bridge->crcsr_bus);
2280 }
2281 
tsi148_probe(struct pci_dev * pdev,const struct pci_device_id * id)2282 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2283 {
2284 	int retval, i, master_num;
2285 	u32 data;
2286 	struct list_head *pos = NULL, *n;
2287 	struct vme_bridge *tsi148_bridge;
2288 	struct tsi148_driver *tsi148_device;
2289 	struct vme_master_resource *master_image;
2290 	struct vme_slave_resource *slave_image;
2291 	struct vme_dma_resource *dma_ctrlr;
2292 	struct vme_lm_resource *lm;
2293 
2294 	/* If we want to support more than one of each bridge, we need to
2295 	 * dynamically generate this so we get one per device
2296 	 */
2297 	tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
2298 	if (tsi148_bridge == NULL) {
2299 		dev_err(&pdev->dev, "Failed to allocate memory for device "
2300 			"structure\n");
2301 		retval = -ENOMEM;
2302 		goto err_struct;
2303 	}
2304 
2305 	tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
2306 	if (tsi148_device == NULL) {
2307 		dev_err(&pdev->dev, "Failed to allocate memory for device "
2308 			"structure\n");
2309 		retval = -ENOMEM;
2310 		goto err_driver;
2311 	}
2312 
2313 	tsi148_bridge->driver_priv = tsi148_device;
2314 
2315 	/* Enable the device */
2316 	retval = pci_enable_device(pdev);
2317 	if (retval) {
2318 		dev_err(&pdev->dev, "Unable to enable device\n");
2319 		goto err_enable;
2320 	}
2321 
2322 	/* Map Registers */
2323 	retval = pci_request_regions(pdev, driver_name);
2324 	if (retval) {
2325 		dev_err(&pdev->dev, "Unable to reserve resources\n");
2326 		goto err_resource;
2327 	}
2328 
2329 	/* map registers in BAR 0 */
2330 	tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
2331 		4096);
2332 	if (!tsi148_device->base) {
2333 		dev_err(&pdev->dev, "Unable to remap CRG region\n");
2334 		retval = -EIO;
2335 		goto err_remap;
2336 	}
2337 
2338 	/* Check to see if the mapping worked out */
2339 	data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
2340 	if (data != PCI_VENDOR_ID_TUNDRA) {
2341 		dev_err(&pdev->dev, "CRG region check failed\n");
2342 		retval = -EIO;
2343 		goto err_test;
2344 	}
2345 
2346 	/* Initialize wait queues & mutual exclusion flags */
2347 	init_waitqueue_head(&tsi148_device->dma_queue[0]);
2348 	init_waitqueue_head(&tsi148_device->dma_queue[1]);
2349 	init_waitqueue_head(&tsi148_device->iack_queue);
2350 	mutex_init(&tsi148_device->vme_int);
2351 	mutex_init(&tsi148_device->vme_rmw);
2352 
2353 	tsi148_bridge->parent = &pdev->dev;
2354 	strcpy(tsi148_bridge->name, driver_name);
2355 
2356 	/* Setup IRQ */
2357 	retval = tsi148_irq_init(tsi148_bridge);
2358 	if (retval != 0) {
2359 		dev_err(&pdev->dev, "Chip Initialization failed.\n");
2360 		goto err_irq;
2361 	}
2362 
2363 	/* If we are going to flush writes, we need to read from the VME bus.
2364 	 * We need to do this safely, thus we read the devices own CR/CSR
2365 	 * register. To do this we must set up a window in CR/CSR space and
2366 	 * hence have one less master window resource available.
2367 	 */
2368 	master_num = TSI148_MAX_MASTER;
2369 	if (err_chk) {
2370 		master_num--;
2371 
2372 		tsi148_device->flush_image =
2373 			kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
2374 		if (tsi148_device->flush_image == NULL) {
2375 			dev_err(&pdev->dev, "Failed to allocate memory for "
2376 			"flush resource structure\n");
2377 			retval = -ENOMEM;
2378 			goto err_master;
2379 		}
2380 		tsi148_device->flush_image->parent = tsi148_bridge;
2381 		spin_lock_init(&tsi148_device->flush_image->lock);
2382 		tsi148_device->flush_image->locked = 1;
2383 		tsi148_device->flush_image->number = master_num;
2384 		memset(&tsi148_device->flush_image->bus_resource, 0,
2385 			sizeof(struct resource));
2386 		tsi148_device->flush_image->kern_base  = NULL;
2387 	}
2388 
2389 	/* Add master windows to list */
2390 	INIT_LIST_HEAD(&tsi148_bridge->master_resources);
2391 	for (i = 0; i < master_num; i++) {
2392 		master_image = kmalloc(sizeof(struct vme_master_resource),
2393 			GFP_KERNEL);
2394 		if (master_image == NULL) {
2395 			dev_err(&pdev->dev, "Failed to allocate memory for "
2396 			"master resource structure\n");
2397 			retval = -ENOMEM;
2398 			goto err_master;
2399 		}
2400 		master_image->parent = tsi148_bridge;
2401 		spin_lock_init(&master_image->lock);
2402 		master_image->locked = 0;
2403 		master_image->number = i;
2404 		master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2405 			VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2406 			VME_USER3 | VME_USER4;
2407 		master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2408 			VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2409 			VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2410 			VME_PROG | VME_DATA;
2411 		master_image->width_attr = VME_D16 | VME_D32;
2412 		memset(&master_image->bus_resource, 0,
2413 			sizeof(struct resource));
2414 		master_image->kern_base  = NULL;
2415 		list_add_tail(&master_image->list,
2416 			&tsi148_bridge->master_resources);
2417 	}
2418 
2419 	/* Add slave windows to list */
2420 	INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
2421 	for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2422 		slave_image = kmalloc(sizeof(struct vme_slave_resource),
2423 			GFP_KERNEL);
2424 		if (slave_image == NULL) {
2425 			dev_err(&pdev->dev, "Failed to allocate memory for "
2426 			"slave resource structure\n");
2427 			retval = -ENOMEM;
2428 			goto err_slave;
2429 		}
2430 		slave_image->parent = tsi148_bridge;
2431 		mutex_init(&slave_image->mtx);
2432 		slave_image->locked = 0;
2433 		slave_image->number = i;
2434 		slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2435 			VME_A64;
2436 		slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2437 			VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2438 			VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2439 			VME_PROG | VME_DATA;
2440 		list_add_tail(&slave_image->list,
2441 			&tsi148_bridge->slave_resources);
2442 	}
2443 
2444 	/* Add dma engines to list */
2445 	INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
2446 	for (i = 0; i < TSI148_MAX_DMA; i++) {
2447 		dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
2448 			GFP_KERNEL);
2449 		if (dma_ctrlr == NULL) {
2450 			dev_err(&pdev->dev, "Failed to allocate memory for "
2451 			"dma resource structure\n");
2452 			retval = -ENOMEM;
2453 			goto err_dma;
2454 		}
2455 		dma_ctrlr->parent = tsi148_bridge;
2456 		mutex_init(&dma_ctrlr->mtx);
2457 		dma_ctrlr->locked = 0;
2458 		dma_ctrlr->number = i;
2459 		dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
2460 			VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
2461 			VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
2462 			VME_DMA_PATTERN_TO_MEM;
2463 		INIT_LIST_HEAD(&dma_ctrlr->pending);
2464 		INIT_LIST_HEAD(&dma_ctrlr->running);
2465 		list_add_tail(&dma_ctrlr->list,
2466 			&tsi148_bridge->dma_resources);
2467 	}
2468 
2469 	/* Add location monitor to list */
2470 	INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
2471 	lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
2472 	if (lm == NULL) {
2473 		dev_err(&pdev->dev, "Failed to allocate memory for "
2474 		"location monitor resource structure\n");
2475 		retval = -ENOMEM;
2476 		goto err_lm;
2477 	}
2478 	lm->parent = tsi148_bridge;
2479 	mutex_init(&lm->mtx);
2480 	lm->locked = 0;
2481 	lm->number = 1;
2482 	lm->monitors = 4;
2483 	list_add_tail(&lm->list, &tsi148_bridge->lm_resources);
2484 
2485 	tsi148_bridge->slave_get = tsi148_slave_get;
2486 	tsi148_bridge->slave_set = tsi148_slave_set;
2487 	tsi148_bridge->master_get = tsi148_master_get;
2488 	tsi148_bridge->master_set = tsi148_master_set;
2489 	tsi148_bridge->master_read = tsi148_master_read;
2490 	tsi148_bridge->master_write = tsi148_master_write;
2491 	tsi148_bridge->master_rmw = tsi148_master_rmw;
2492 	tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2493 	tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2494 	tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2495 	tsi148_bridge->irq_set = tsi148_irq_set;
2496 	tsi148_bridge->irq_generate = tsi148_irq_generate;
2497 	tsi148_bridge->lm_set = tsi148_lm_set;
2498 	tsi148_bridge->lm_get = tsi148_lm_get;
2499 	tsi148_bridge->lm_attach = tsi148_lm_attach;
2500 	tsi148_bridge->lm_detach = tsi148_lm_detach;
2501 	tsi148_bridge->slot_get = tsi148_slot_get;
2502 	tsi148_bridge->alloc_consistent = tsi148_alloc_consistent;
2503 	tsi148_bridge->free_consistent = tsi148_free_consistent;
2504 
2505 	data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2506 	dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2507 		(data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
2508 	if (!geoid)
2509 		dev_info(&pdev->dev, "VME geographical address is %d\n",
2510 			data & TSI148_LCSR_VSTAT_GA_M);
2511 	else
2512 		dev_info(&pdev->dev, "VME geographical address is set to %d\n",
2513 			geoid);
2514 
2515 	dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2516 		err_chk ? "enabled" : "disabled");
2517 
2518 	retval = tsi148_crcsr_init(tsi148_bridge, pdev);
2519 	if (retval) {
2520 		dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2521 		goto err_crcsr;
2522 	}
2523 
2524 	retval = vme_register_bridge(tsi148_bridge);
2525 	if (retval != 0) {
2526 		dev_err(&pdev->dev, "Chip Registration failed.\n");
2527 		goto err_reg;
2528 	}
2529 
2530 	pci_set_drvdata(pdev, tsi148_bridge);
2531 
2532 	/* Clear VME bus "board fail", and "power-up reset" lines */
2533 	data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
2534 	data &= ~TSI148_LCSR_VSTAT_BRDFL;
2535 	data |= TSI148_LCSR_VSTAT_CPURST;
2536 	iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
2537 
2538 	return 0;
2539 
2540 err_reg:
2541 	tsi148_crcsr_exit(tsi148_bridge, pdev);
2542 err_crcsr:
2543 err_lm:
2544 	/* resources are stored in link list */
2545 	list_for_each_safe(pos, n, &tsi148_bridge->lm_resources) {
2546 		lm = list_entry(pos, struct vme_lm_resource, list);
2547 		list_del(pos);
2548 		kfree(lm);
2549 	}
2550 err_dma:
2551 	/* resources are stored in link list */
2552 	list_for_each_safe(pos, n, &tsi148_bridge->dma_resources) {
2553 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2554 		list_del(pos);
2555 		kfree(dma_ctrlr);
2556 	}
2557 err_slave:
2558 	/* resources are stored in link list */
2559 	list_for_each_safe(pos, n, &tsi148_bridge->slave_resources) {
2560 		slave_image = list_entry(pos, struct vme_slave_resource, list);
2561 		list_del(pos);
2562 		kfree(slave_image);
2563 	}
2564 err_master:
2565 	/* resources are stored in link list */
2566 	list_for_each_safe(pos, n, &tsi148_bridge->master_resources) {
2567 		master_image = list_entry(pos, struct vme_master_resource,
2568 			list);
2569 		list_del(pos);
2570 		kfree(master_image);
2571 	}
2572 
2573 	tsi148_irq_exit(tsi148_bridge, pdev);
2574 err_irq:
2575 err_test:
2576 	iounmap(tsi148_device->base);
2577 err_remap:
2578 	pci_release_regions(pdev);
2579 err_resource:
2580 	pci_disable_device(pdev);
2581 err_enable:
2582 	kfree(tsi148_device);
2583 err_driver:
2584 	kfree(tsi148_bridge);
2585 err_struct:
2586 	return retval;
2587 
2588 }
2589 
tsi148_remove(struct pci_dev * pdev)2590 static void tsi148_remove(struct pci_dev *pdev)
2591 {
2592 	struct list_head *pos = NULL;
2593 	struct list_head *tmplist;
2594 	struct vme_master_resource *master_image;
2595 	struct vme_slave_resource *slave_image;
2596 	struct vme_dma_resource *dma_ctrlr;
2597 	int i;
2598 	struct tsi148_driver *bridge;
2599 	struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
2600 
2601 	bridge = tsi148_bridge->driver_priv;
2602 
2603 
2604 	dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2605 
2606 	/*
2607 	 *  Shutdown all inbound and outbound windows.
2608 	 */
2609 	for (i = 0; i < 8; i++) {
2610 		iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
2611 			TSI148_LCSR_OFFSET_ITAT);
2612 		iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
2613 			TSI148_LCSR_OFFSET_OTAT);
2614 	}
2615 
2616 	/*
2617 	 *  Shutdown Location monitor.
2618 	 */
2619 	iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
2620 
2621 	/*
2622 	 *  Shutdown CRG map.
2623 	 */
2624 	iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
2625 
2626 	/*
2627 	 *  Clear error status.
2628 	 */
2629 	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
2630 	iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
2631 	iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
2632 
2633 	/*
2634 	 *  Remove VIRQ interrupt (if any)
2635 	 */
2636 	if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
2637 		iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
2638 
2639 	/*
2640 	 *  Map all Interrupts to PCI INTA
2641 	 */
2642 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
2643 	iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
2644 
2645 	tsi148_irq_exit(tsi148_bridge, pdev);
2646 
2647 	vme_unregister_bridge(tsi148_bridge);
2648 
2649 	tsi148_crcsr_exit(tsi148_bridge, pdev);
2650 
2651 	/* resources are stored in link list */
2652 	list_for_each_safe(pos, tmplist, &tsi148_bridge->dma_resources) {
2653 		dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2654 		list_del(pos);
2655 		kfree(dma_ctrlr);
2656 	}
2657 
2658 	/* resources are stored in link list */
2659 	list_for_each_safe(pos, tmplist, &tsi148_bridge->slave_resources) {
2660 		slave_image = list_entry(pos, struct vme_slave_resource, list);
2661 		list_del(pos);
2662 		kfree(slave_image);
2663 	}
2664 
2665 	/* resources are stored in link list */
2666 	list_for_each_safe(pos, tmplist, &tsi148_bridge->master_resources) {
2667 		master_image = list_entry(pos, struct vme_master_resource,
2668 			list);
2669 		list_del(pos);
2670 		kfree(master_image);
2671 	}
2672 
2673 	iounmap(bridge->base);
2674 
2675 	pci_release_regions(pdev);
2676 
2677 	pci_disable_device(pdev);
2678 
2679 	kfree(tsi148_bridge->driver_priv);
2680 
2681 	kfree(tsi148_bridge);
2682 }
2683 
2684 module_pci_driver(tsi148_driver);
2685 
2686 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2687 module_param(err_chk, bool, 0);
2688 
2689 MODULE_PARM_DESC(geoid, "Override geographical addressing");
2690 module_param(geoid, int, 0);
2691 
2692 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2693 MODULE_LICENSE("GPL");
2694