1/*
2 * Driver for PLX NET2272 USB device controller
3 *
4 * Copyright (C) 2005-2006 PLX Technology, Inc.
5 * Copyright (C) 2006-2011 Analog Devices, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20 */
21
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/errno.h>
25#include <linux/gpio.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/ioport.h>
30#include <linux/kernel.h>
31#include <linux/list.h>
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/pci.h>
35#include <linux/platform_device.h>
36#include <linux/prefetch.h>
37#include <linux/sched.h>
38#include <linux/slab.h>
39#include <linux/timer.h>
40#include <linux/usb.h>
41#include <linux/usb/ch9.h>
42#include <linux/usb/gadget.h>
43
44#include <asm/byteorder.h>
45#include <asm/unaligned.h>
46
47#include "net2272.h"
48
49#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
50
51static const char driver_name[] = "net2272";
52static const char driver_vers[] = "2006 October 17/mainline";
53static const char driver_desc[] = DRIVER_DESC;
54
55static const char ep0name[] = "ep0";
56static const char * const ep_name[] = {
57	ep0name,
58	"ep-a", "ep-b", "ep-c",
59};
60
61#ifdef CONFIG_USB_NET2272_DMA
62/*
63 * use_dma: the NET2272 can use an external DMA controller.
64 * Note that since there is no generic DMA api, some functions,
65 * notably request_dma, start_dma, and cancel_dma will need to be
66 * modified for your platform's particular dma controller.
67 *
68 * If use_dma is disabled, pio will be used instead.
69 */
70static bool use_dma = 0;
71module_param(use_dma, bool, 0644);
72
73/*
74 * dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
75 * The NET2272 can only use dma for a single endpoint at a time.
76 * At some point this could be modified to allow either endpoint
77 * to take control of dma as it becomes available.
78 *
79 * Note that DMA should not be used on OUT endpoints unless it can
80 * be guaranteed that no short packets will arrive on an IN endpoint
81 * while the DMA operation is pending.  Otherwise the OUT DMA will
82 * terminate prematurely (See NET2272 Errata 630-0213-0101)
83 */
84static ushort dma_ep = 1;
85module_param(dma_ep, ushort, 0644);
86
87/*
88 * dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
89 *	mode 0 == Slow DREQ mode
90 *	mode 1 == Fast DREQ mode
91 *	mode 2 == Burst mode
92 */
93static ushort dma_mode = 2;
94module_param(dma_mode, ushort, 0644);
95#else
96#define use_dma 0
97#define dma_ep 1
98#define dma_mode 2
99#endif
100
101/*
102 * fifo_mode: net2272 buffer configuration:
103 *      mode 0 == ep-{a,b,c} 512db each
104 *      mode 1 == ep-a 1k, ep-{b,c} 512db
105 *      mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
106 *      mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
107 */
108static ushort fifo_mode = 0;
109module_param(fifo_mode, ushort, 0644);
110
111/*
112 * enable_suspend: When enabled, the driver will respond to
113 * USB suspend requests by powering down the NET2272.  Otherwise,
114 * USB suspend requests will be ignored.  This is acceptible for
115 * self-powered devices.  For bus powered devices set this to 1.
116 */
117static ushort enable_suspend = 0;
118module_param(enable_suspend, ushort, 0644);
119
120static void assert_out_naking(struct net2272_ep *ep, const char *where)
121{
122	u8 tmp;
123
124#ifndef DEBUG
125	return;
126#endif
127
128	tmp = net2272_ep_read(ep, EP_STAT0);
129	if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
130		dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
131			ep->ep.name, where, tmp);
132		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
133	}
134}
135#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
136
137static void stop_out_naking(struct net2272_ep *ep)
138{
139	u8 tmp = net2272_ep_read(ep, EP_STAT0);
140
141	if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
142		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
143}
144
145#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
146
147static char *type_string(u8 bmAttributes)
148{
149	switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
150	case USB_ENDPOINT_XFER_BULK: return "bulk";
151	case USB_ENDPOINT_XFER_ISOC: return "iso";
152	case USB_ENDPOINT_XFER_INT:  return "intr";
153	default:                     return "control";
154	}
155}
156
157static char *buf_state_string(unsigned state)
158{
159	switch (state) {
160	case BUFF_FREE:  return "free";
161	case BUFF_VALID: return "valid";
162	case BUFF_LCL:   return "local";
163	case BUFF_USB:   return "usb";
164	default:         return "unknown";
165	}
166}
167
168static char *dma_mode_string(void)
169{
170	if (!use_dma)
171		return "PIO";
172	switch (dma_mode) {
173	case 0:  return "SLOW DREQ";
174	case 1:  return "FAST DREQ";
175	case 2:  return "BURST";
176	default: return "invalid";
177	}
178}
179
180static void net2272_dequeue_all(struct net2272_ep *);
181static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
182static int net2272_fifo_status(struct usb_ep *);
183
184static struct usb_ep_ops net2272_ep_ops;
185
186/*---------------------------------------------------------------------------*/
187
188static int
189net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
190{
191	struct net2272 *dev;
192	struct net2272_ep *ep;
193	u32 max;
194	u8 tmp;
195	unsigned long flags;
196
197	ep = container_of(_ep, struct net2272_ep, ep);
198	if (!_ep || !desc || ep->desc || _ep->name == ep0name
199			|| desc->bDescriptorType != USB_DT_ENDPOINT)
200		return -EINVAL;
201	dev = ep->dev;
202	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
203		return -ESHUTDOWN;
204
205	max = usb_endpoint_maxp(desc) & 0x1fff;
206
207	spin_lock_irqsave(&dev->lock, flags);
208	_ep->maxpacket = max & 0x7fff;
209	ep->desc = desc;
210
211	/* net2272_ep_reset() has already been called */
212	ep->stopped = 0;
213	ep->wedged = 0;
214
215	/* set speed-dependent max packet */
216	net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
217	net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
218
219	/* set type, direction, address; reset fifo counters */
220	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
221	tmp = usb_endpoint_type(desc);
222	if (usb_endpoint_xfer_bulk(desc)) {
223		/* catch some particularly blatant driver bugs */
224		if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
225		    (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
226			spin_unlock_irqrestore(&dev->lock, flags);
227			return -ERANGE;
228		}
229	}
230	ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
231	tmp <<= ENDPOINT_TYPE;
232	tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
233	tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
234	tmp |= (1 << ENDPOINT_ENABLE);
235
236	/* for OUT transfers, block the rx fifo until a read is posted */
237	ep->is_in = usb_endpoint_dir_in(desc);
238	if (!ep->is_in)
239		net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
240
241	net2272_ep_write(ep, EP_CFG, tmp);
242
243	/* enable irqs */
244	tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
245	net2272_write(dev, IRQENB0, tmp);
246
247	tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
248		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
249		| net2272_ep_read(ep, EP_IRQENB);
250	net2272_ep_write(ep, EP_IRQENB, tmp);
251
252	tmp = desc->bEndpointAddress;
253	dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
254		_ep->name, tmp & 0x0f, PIPEDIR(tmp),
255		type_string(desc->bmAttributes), max,
256		net2272_ep_read(ep, EP_CFG));
257
258	spin_unlock_irqrestore(&dev->lock, flags);
259	return 0;
260}
261
262static void net2272_ep_reset(struct net2272_ep *ep)
263{
264	u8 tmp;
265
266	ep->desc = NULL;
267	INIT_LIST_HEAD(&ep->queue);
268
269	usb_ep_set_maxpacket_limit(&ep->ep, ~0);
270	ep->ep.ops = &net2272_ep_ops;
271
272	/* disable irqs, endpoint */
273	net2272_ep_write(ep, EP_IRQENB, 0);
274
275	/* init to our chosen defaults, notably so that we NAK OUT
276	 * packets until the driver queues a read.
277	 */
278	tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
279	net2272_ep_write(ep, EP_RSPSET, tmp);
280
281	tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
282	if (ep->num != 0)
283		tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
284
285	net2272_ep_write(ep, EP_RSPCLR, tmp);
286
287	/* scrub most status bits, and flush any fifo state */
288	net2272_ep_write(ep, EP_STAT0,
289			  (1 << DATA_IN_TOKEN_INTERRUPT)
290			| (1 << DATA_OUT_TOKEN_INTERRUPT)
291			| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
292			| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
293			| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
294
295	net2272_ep_write(ep, EP_STAT1,
296			    (1 << TIMEOUT)
297			  | (1 << USB_OUT_ACK_SENT)
298			  | (1 << USB_OUT_NAK_SENT)
299			  | (1 << USB_IN_ACK_RCVD)
300			  | (1 << USB_IN_NAK_SENT)
301			  | (1 << USB_STALL_SENT)
302			  | (1 << LOCAL_OUT_ZLP)
303			  | (1 << BUFFER_FLUSH));
304
305	/* fifo size is handled seperately */
306}
307
308static int net2272_disable(struct usb_ep *_ep)
309{
310	struct net2272_ep *ep;
311	unsigned long flags;
312
313	ep = container_of(_ep, struct net2272_ep, ep);
314	if (!_ep || !ep->desc || _ep->name == ep0name)
315		return -EINVAL;
316
317	spin_lock_irqsave(&ep->dev->lock, flags);
318	net2272_dequeue_all(ep);
319	net2272_ep_reset(ep);
320
321	dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
322
323	spin_unlock_irqrestore(&ep->dev->lock, flags);
324	return 0;
325}
326
327/*---------------------------------------------------------------------------*/
328
329static struct usb_request *
330net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
331{
332	struct net2272_ep *ep;
333	struct net2272_request *req;
334
335	if (!_ep)
336		return NULL;
337	ep = container_of(_ep, struct net2272_ep, ep);
338
339	req = kzalloc(sizeof(*req), gfp_flags);
340	if (!req)
341		return NULL;
342
343	INIT_LIST_HEAD(&req->queue);
344
345	return &req->req;
346}
347
348static void
349net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
350{
351	struct net2272_ep *ep;
352	struct net2272_request *req;
353
354	ep = container_of(_ep, struct net2272_ep, ep);
355	if (!_ep || !_req)
356		return;
357
358	req = container_of(_req, struct net2272_request, req);
359	WARN_ON(!list_empty(&req->queue));
360	kfree(req);
361}
362
363static void
364net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
365{
366	struct net2272 *dev;
367	unsigned stopped = ep->stopped;
368
369	if (ep->num == 0) {
370		if (ep->dev->protocol_stall) {
371			ep->stopped = 1;
372			set_halt(ep);
373		}
374		allow_status(ep);
375	}
376
377	list_del_init(&req->queue);
378
379	if (req->req.status == -EINPROGRESS)
380		req->req.status = status;
381	else
382		status = req->req.status;
383
384	dev = ep->dev;
385	if (use_dma && ep->dma)
386		usb_gadget_unmap_request(&dev->gadget, &req->req,
387				ep->is_in);
388
389	if (status && status != -ESHUTDOWN)
390		dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
391			ep->ep.name, &req->req, status,
392			req->req.actual, req->req.length, req->req.buf);
393
394	/* don't modify queue heads during completion callback */
395	ep->stopped = 1;
396	spin_unlock(&dev->lock);
397	usb_gadget_giveback_request(&ep->ep, &req->req);
398	spin_lock(&dev->lock);
399	ep->stopped = stopped;
400}
401
402static int
403net2272_write_packet(struct net2272_ep *ep, u8 *buf,
404	struct net2272_request *req, unsigned max)
405{
406	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
407	u16 *bufp;
408	unsigned length, count;
409	u8 tmp;
410
411	length = min(req->req.length - req->req.actual, max);
412	req->req.actual += length;
413
414	dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
415		ep->ep.name, req, max, length,
416		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
417
418	count = length;
419	bufp = (u16 *)buf;
420
421	while (likely(count >= 2)) {
422		/* no byte-swap required; chip endian set during init */
423		writew(*bufp++, ep_data);
424		count -= 2;
425	}
426	buf = (u8 *)bufp;
427
428	/* write final byte by placing the NET2272 into 8-bit mode */
429	if (unlikely(count)) {
430		tmp = net2272_read(ep->dev, LOCCTL);
431		net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
432		writeb(*buf, ep_data);
433		net2272_write(ep->dev, LOCCTL, tmp);
434	}
435	return length;
436}
437
438/* returns: 0: still running, 1: completed, negative: errno */
439static int
440net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
441{
442	u8 *buf;
443	unsigned count, max;
444	int status;
445
446	dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
447		ep->ep.name, req->req.actual, req->req.length);
448
449	/*
450	 * Keep loading the endpoint until the final packet is loaded,
451	 * or the endpoint buffer is full.
452	 */
453 top:
454	/*
455	 * Clear interrupt status
456	 *  - Packet Transmitted interrupt will become set again when the
457	 *    host successfully takes another packet
458	 */
459	net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
460	while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
461		buf = req->req.buf + req->req.actual;
462		prefetch(buf);
463
464		/* force pagesel */
465		net2272_ep_read(ep, EP_STAT0);
466
467		max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
468			(net2272_ep_read(ep, EP_AVAIL0));
469
470		if (max < ep->ep.maxpacket)
471			max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
472				| (net2272_ep_read(ep, EP_AVAIL0));
473
474		count = net2272_write_packet(ep, buf, req, max);
475		/* see if we are done */
476		if (req->req.length == req->req.actual) {
477			/* validate short or zlp packet */
478			if (count < ep->ep.maxpacket)
479				set_fifo_bytecount(ep, 0);
480			net2272_done(ep, req, 0);
481
482			if (!list_empty(&ep->queue)) {
483				req = list_entry(ep->queue.next,
484						struct net2272_request,
485						queue);
486				status = net2272_kick_dma(ep, req);
487
488				if (status < 0)
489					if ((net2272_ep_read(ep, EP_STAT0)
490							& (1 << BUFFER_EMPTY)))
491						goto top;
492			}
493			return 1;
494		}
495		net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
496	}
497	return 0;
498}
499
500static void
501net2272_out_flush(struct net2272_ep *ep)
502{
503	ASSERT_OUT_NAKING(ep);
504
505	net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
506			| (1 << DATA_PACKET_RECEIVED_INTERRUPT));
507	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
508}
509
510static int
511net2272_read_packet(struct net2272_ep *ep, u8 *buf,
512	struct net2272_request *req, unsigned avail)
513{
514	u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
515	unsigned is_short;
516	u16 *bufp;
517
518	req->req.actual += avail;
519
520	dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
521		ep->ep.name, req, avail,
522		(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
523
524	is_short = (avail < ep->ep.maxpacket);
525
526	if (unlikely(avail == 0)) {
527		/* remove any zlp from the buffer */
528		(void)readw(ep_data);
529		return is_short;
530	}
531
532	/* Ensure we get the final byte */
533	if (unlikely(avail % 2))
534		avail++;
535	bufp = (u16 *)buf;
536
537	do {
538		*bufp++ = readw(ep_data);
539		avail -= 2;
540	} while (avail);
541
542	/*
543	 * To avoid false endpoint available race condition must read
544	 * ep stat0 twice in the case of a short transfer
545	 */
546	if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
547		net2272_ep_read(ep, EP_STAT0);
548
549	return is_short;
550}
551
552static int
553net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
554{
555	u8 *buf;
556	unsigned is_short;
557	int count;
558	int tmp;
559	int cleanup = 0;
560	int status = -1;
561
562	dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
563		ep->ep.name, req->req.actual, req->req.length);
564
565 top:
566	do {
567		buf = req->req.buf + req->req.actual;
568		prefetchw(buf);
569
570		count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
571			| net2272_ep_read(ep, EP_AVAIL0);
572
573		net2272_ep_write(ep, EP_STAT0,
574			(1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
575			(1 << DATA_PACKET_RECEIVED_INTERRUPT));
576
577		tmp = req->req.length - req->req.actual;
578
579		if (count > tmp) {
580			if ((tmp % ep->ep.maxpacket) != 0) {
581				dev_err(ep->dev->dev,
582					"%s out fifo %d bytes, expected %d\n",
583					ep->ep.name, count, tmp);
584				cleanup = 1;
585			}
586			count = (tmp > 0) ? tmp : 0;
587		}
588
589		is_short = net2272_read_packet(ep, buf, req, count);
590
591		/* completion */
592		if (unlikely(cleanup || is_short ||
593				((req->req.actual == req->req.length)
594				 && !req->req.zero))) {
595
596			if (cleanup) {
597				net2272_out_flush(ep);
598				net2272_done(ep, req, -EOVERFLOW);
599			} else
600				net2272_done(ep, req, 0);
601
602			/* re-initialize endpoint transfer registers
603			 * otherwise they may result in erroneous pre-validation
604			 * for subsequent control reads
605			 */
606			if (unlikely(ep->num == 0)) {
607				net2272_ep_write(ep, EP_TRANSFER2, 0);
608				net2272_ep_write(ep, EP_TRANSFER1, 0);
609				net2272_ep_write(ep, EP_TRANSFER0, 0);
610			}
611
612			if (!list_empty(&ep->queue)) {
613				req = list_entry(ep->queue.next,
614					struct net2272_request, queue);
615				status = net2272_kick_dma(ep, req);
616				if ((status < 0) &&
617				    !(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
618					goto top;
619			}
620			return 1;
621		}
622	} while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
623
624	return 0;
625}
626
627static void
628net2272_pio_advance(struct net2272_ep *ep)
629{
630	struct net2272_request *req;
631
632	if (unlikely(list_empty(&ep->queue)))
633		return;
634
635	req = list_entry(ep->queue.next, struct net2272_request, queue);
636	(ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
637}
638
639/* returns 0 on success, else negative errno */
640static int
641net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
642	unsigned len, unsigned dir)
643{
644	dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
645		ep, buf, len, dir);
646
647	/* The NET2272 only supports a single dma channel */
648	if (dev->dma_busy)
649		return -EBUSY;
650	/*
651	 * EP_TRANSFER (used to determine the number of bytes received
652	 * in an OUT transfer) is 24 bits wide; don't ask for more than that.
653	 */
654	if ((dir == 1) && (len > 0x1000000))
655		return -EINVAL;
656
657	dev->dma_busy = 1;
658
659	/* initialize platform's dma */
660#ifdef CONFIG_PCI
661	/* NET2272 addr, buffer addr, length, etc. */
662	switch (dev->dev_id) {
663	case PCI_DEVICE_ID_RDK1:
664		/* Setup PLX 9054 DMA mode */
665		writel((1 << LOCAL_BUS_WIDTH) |
666			(1 << TA_READY_INPUT_ENABLE) |
667			(0 << LOCAL_BURST_ENABLE) |
668			(1 << DONE_INTERRUPT_ENABLE) |
669			(1 << LOCAL_ADDRESSING_MODE) |
670			(1 << DEMAND_MODE) |
671			(1 << DMA_EOT_ENABLE) |
672			(1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
673			(1 << DMA_CHANNEL_INTERRUPT_SELECT),
674			dev->rdk1.plx9054_base_addr + DMAMODE0);
675
676		writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
677		writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
678		writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
679		writel((dir << DIRECTION_OF_TRANSFER) |
680			(1 << INTERRUPT_AFTER_TERMINAL_COUNT),
681			dev->rdk1.plx9054_base_addr + DMADPR0);
682		writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
683			readl(dev->rdk1.plx9054_base_addr + INTCSR),
684			dev->rdk1.plx9054_base_addr + INTCSR);
685
686		break;
687	}
688#endif
689
690	net2272_write(dev, DMAREQ,
691		(0 << DMA_BUFFER_VALID) |
692		(1 << DMA_REQUEST_ENABLE) |
693		(1 << DMA_CONTROL_DACK) |
694		(dev->dma_eot_polarity << EOT_POLARITY) |
695		(dev->dma_dack_polarity << DACK_POLARITY) |
696		(dev->dma_dreq_polarity << DREQ_POLARITY) |
697		((ep >> 1) << DMA_ENDPOINT_SELECT));
698
699	(void) net2272_read(dev, SCRATCH);
700
701	return 0;
702}
703
704static void
705net2272_start_dma(struct net2272 *dev)
706{
707	/* start platform's dma controller */
708#ifdef CONFIG_PCI
709	switch (dev->dev_id) {
710	case PCI_DEVICE_ID_RDK1:
711		writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
712			dev->rdk1.plx9054_base_addr + DMACSR0);
713		break;
714	}
715#endif
716}
717
718/* returns 0 on success, else negative errno */
719static int
720net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
721{
722	unsigned size;
723	u8 tmp;
724
725	if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
726		return -EINVAL;
727
728	/* don't use dma for odd-length transfers
729	 * otherwise, we'd need to deal with the last byte with pio
730	 */
731	if (req->req.length & 1)
732		return -EINVAL;
733
734	dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
735		ep->ep.name, req, (unsigned long long) req->req.dma);
736
737	net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
738
739	/* The NET2272 can only use DMA on one endpoint at a time */
740	if (ep->dev->dma_busy)
741		return -EBUSY;
742
743	/* Make sure we only DMA an even number of bytes (we'll use
744	 * pio to complete the transfer)
745	 */
746	size = req->req.length;
747	size &= ~1;
748
749	/* device-to-host transfer */
750	if (ep->is_in) {
751		/* initialize platform's dma controller */
752		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
753			/* unable to obtain DMA channel; return error and use pio mode */
754			return -EBUSY;
755		req->req.actual += size;
756
757	/* host-to-device transfer */
758	} else {
759		tmp = net2272_ep_read(ep, EP_STAT0);
760
761		/* initialize platform's dma controller */
762		if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
763			/* unable to obtain DMA channel; return error and use pio mode */
764			return -EBUSY;
765
766		if (!(tmp & (1 << BUFFER_EMPTY)))
767			ep->not_empty = 1;
768		else
769			ep->not_empty = 0;
770
771
772		/* allow the endpoint's buffer to fill */
773		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
774
775		/* this transfer completed and data's already in the fifo
776		 * return error so pio gets used.
777		 */
778		if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
779
780			/* deassert dreq */
781			net2272_write(ep->dev, DMAREQ,
782				(0 << DMA_BUFFER_VALID) |
783				(0 << DMA_REQUEST_ENABLE) |
784				(1 << DMA_CONTROL_DACK) |
785				(ep->dev->dma_eot_polarity << EOT_POLARITY) |
786				(ep->dev->dma_dack_polarity << DACK_POLARITY) |
787				(ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
788				((ep->num >> 1) << DMA_ENDPOINT_SELECT));
789
790			return -EBUSY;
791		}
792	}
793
794	/* Don't use per-packet interrupts: use dma interrupts only */
795	net2272_ep_write(ep, EP_IRQENB, 0);
796
797	net2272_start_dma(ep->dev);
798
799	return 0;
800}
801
802static void net2272_cancel_dma(struct net2272 *dev)
803{
804#ifdef CONFIG_PCI
805	switch (dev->dev_id) {
806	case PCI_DEVICE_ID_RDK1:
807		writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
808		writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
809		while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
810		         (1 << CHANNEL_DONE)))
811			continue;	/* wait for dma to stabalize */
812
813		/* dma abort generates an interrupt */
814		writeb(1 << CHANNEL_CLEAR_INTERRUPT,
815			dev->rdk1.plx9054_base_addr + DMACSR0);
816		break;
817	}
818#endif
819
820	dev->dma_busy = 0;
821}
822
823/*---------------------------------------------------------------------------*/
824
825static int
826net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
827{
828	struct net2272_request *req;
829	struct net2272_ep *ep;
830	struct net2272 *dev;
831	unsigned long flags;
832	int status = -1;
833	u8 s;
834
835	req = container_of(_req, struct net2272_request, req);
836	if (!_req || !_req->complete || !_req->buf
837			|| !list_empty(&req->queue))
838		return -EINVAL;
839	ep = container_of(_ep, struct net2272_ep, ep);
840	if (!_ep || (!ep->desc && ep->num != 0))
841		return -EINVAL;
842	dev = ep->dev;
843	if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
844		return -ESHUTDOWN;
845
846	/* set up dma mapping in case the caller didn't */
847	if (use_dma && ep->dma) {
848		status = usb_gadget_map_request(&dev->gadget, _req,
849				ep->is_in);
850		if (status)
851			return status;
852	}
853
854	dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
855		_ep->name, _req, _req->length, _req->buf,
856		(unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
857
858	spin_lock_irqsave(&dev->lock, flags);
859
860	_req->status = -EINPROGRESS;
861	_req->actual = 0;
862
863	/* kickstart this i/o queue? */
864	if (list_empty(&ep->queue) && !ep->stopped) {
865		/* maybe there's no control data, just status ack */
866		if (ep->num == 0 && _req->length == 0) {
867			net2272_done(ep, req, 0);
868			dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
869			goto done;
870		}
871
872		/* Return zlp, don't let it block subsequent packets */
873		s = net2272_ep_read(ep, EP_STAT0);
874		if (s & (1 << BUFFER_EMPTY)) {
875			/* Buffer is empty check for a blocking zlp, handle it */
876			if ((s & (1 << NAK_OUT_PACKETS)) &&
877			    net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
878				dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
879				/*
880				 * Request is going to terminate with a short packet ...
881				 * hope the client is ready for it!
882				 */
883				status = net2272_read_fifo(ep, req);
884				/* clear short packet naking */
885				net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
886				goto done;
887			}
888		}
889
890		/* try dma first */
891		status = net2272_kick_dma(ep, req);
892
893		if (status < 0) {
894			/* dma failed (most likely in use by another endpoint)
895			 * fallback to pio
896			 */
897			status = 0;
898
899			if (ep->is_in)
900				status = net2272_write_fifo(ep, req);
901			else {
902				s = net2272_ep_read(ep, EP_STAT0);
903				if ((s & (1 << BUFFER_EMPTY)) == 0)
904					status = net2272_read_fifo(ep, req);
905			}
906
907			if (unlikely(status != 0)) {
908				if (status > 0)
909					status = 0;
910				req = NULL;
911			}
912		}
913	}
914	if (likely(req))
915		list_add_tail(&req->queue, &ep->queue);
916
917	if (likely(!list_empty(&ep->queue)))
918		net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
919 done:
920	spin_unlock_irqrestore(&dev->lock, flags);
921
922	return 0;
923}
924
925/* dequeue ALL requests */
926static void
927net2272_dequeue_all(struct net2272_ep *ep)
928{
929	struct net2272_request *req;
930
931	/* called with spinlock held */
932	ep->stopped = 1;
933
934	while (!list_empty(&ep->queue)) {
935		req = list_entry(ep->queue.next,
936				struct net2272_request,
937				queue);
938		net2272_done(ep, req, -ESHUTDOWN);
939	}
940}
941
942/* dequeue JUST ONE request */
943static int
944net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
945{
946	struct net2272_ep *ep;
947	struct net2272_request *req;
948	unsigned long flags;
949	int stopped;
950
951	ep = container_of(_ep, struct net2272_ep, ep);
952	if (!_ep || (!ep->desc && ep->num != 0) || !_req)
953		return -EINVAL;
954
955	spin_lock_irqsave(&ep->dev->lock, flags);
956	stopped = ep->stopped;
957	ep->stopped = 1;
958
959	/* make sure it's still queued on this endpoint */
960	list_for_each_entry(req, &ep->queue, queue) {
961		if (&req->req == _req)
962			break;
963	}
964	if (&req->req != _req) {
965		spin_unlock_irqrestore(&ep->dev->lock, flags);
966		return -EINVAL;
967	}
968
969	/* queue head may be partially complete */
970	if (ep->queue.next == &req->queue) {
971		dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
972		net2272_done(ep, req, -ECONNRESET);
973	}
974	req = NULL;
975	ep->stopped = stopped;
976
977	spin_unlock_irqrestore(&ep->dev->lock, flags);
978	return 0;
979}
980
981/*---------------------------------------------------------------------------*/
982
983static int
984net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
985{
986	struct net2272_ep *ep;
987	unsigned long flags;
988	int ret = 0;
989
990	ep = container_of(_ep, struct net2272_ep, ep);
991	if (!_ep || (!ep->desc && ep->num != 0))
992		return -EINVAL;
993	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
994		return -ESHUTDOWN;
995	if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
996		return -EINVAL;
997
998	spin_lock_irqsave(&ep->dev->lock, flags);
999	if (!list_empty(&ep->queue))
1000		ret = -EAGAIN;
1001	else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
1002		ret = -EAGAIN;
1003	else {
1004		dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
1005			value ? "set" : "clear",
1006			wedged ? "wedge" : "halt");
1007		/* set/clear */
1008		if (value) {
1009			if (ep->num == 0)
1010				ep->dev->protocol_stall = 1;
1011			else
1012				set_halt(ep);
1013			if (wedged)
1014				ep->wedged = 1;
1015		} else {
1016			clear_halt(ep);
1017			ep->wedged = 0;
1018		}
1019	}
1020	spin_unlock_irqrestore(&ep->dev->lock, flags);
1021
1022	return ret;
1023}
1024
1025static int
1026net2272_set_halt(struct usb_ep *_ep, int value)
1027{
1028	return net2272_set_halt_and_wedge(_ep, value, 0);
1029}
1030
1031static int
1032net2272_set_wedge(struct usb_ep *_ep)
1033{
1034	if (!_ep || _ep->name == ep0name)
1035		return -EINVAL;
1036	return net2272_set_halt_and_wedge(_ep, 1, 1);
1037}
1038
1039static int
1040net2272_fifo_status(struct usb_ep *_ep)
1041{
1042	struct net2272_ep *ep;
1043	u16 avail;
1044
1045	ep = container_of(_ep, struct net2272_ep, ep);
1046	if (!_ep || (!ep->desc && ep->num != 0))
1047		return -ENODEV;
1048	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1049		return -ESHUTDOWN;
1050
1051	avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
1052	avail |= net2272_ep_read(ep, EP_AVAIL0);
1053	if (avail > ep->fifo_size)
1054		return -EOVERFLOW;
1055	if (ep->is_in)
1056		avail = ep->fifo_size - avail;
1057	return avail;
1058}
1059
1060static void
1061net2272_fifo_flush(struct usb_ep *_ep)
1062{
1063	struct net2272_ep *ep;
1064
1065	ep = container_of(_ep, struct net2272_ep, ep);
1066	if (!_ep || (!ep->desc && ep->num != 0))
1067		return;
1068	if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1069		return;
1070
1071	net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
1072}
1073
1074static struct usb_ep_ops net2272_ep_ops = {
1075	.enable        = net2272_enable,
1076	.disable       = net2272_disable,
1077
1078	.alloc_request = net2272_alloc_request,
1079	.free_request  = net2272_free_request,
1080
1081	.queue         = net2272_queue,
1082	.dequeue       = net2272_dequeue,
1083
1084	.set_halt      = net2272_set_halt,
1085	.set_wedge     = net2272_set_wedge,
1086	.fifo_status   = net2272_fifo_status,
1087	.fifo_flush    = net2272_fifo_flush,
1088};
1089
1090/*---------------------------------------------------------------------------*/
1091
1092static int
1093net2272_get_frame(struct usb_gadget *_gadget)
1094{
1095	struct net2272 *dev;
1096	unsigned long flags;
1097	u16 ret;
1098
1099	if (!_gadget)
1100		return -ENODEV;
1101	dev = container_of(_gadget, struct net2272, gadget);
1102	spin_lock_irqsave(&dev->lock, flags);
1103
1104	ret = net2272_read(dev, FRAME1) << 8;
1105	ret |= net2272_read(dev, FRAME0);
1106
1107	spin_unlock_irqrestore(&dev->lock, flags);
1108	return ret;
1109}
1110
1111static int
1112net2272_wakeup(struct usb_gadget *_gadget)
1113{
1114	struct net2272 *dev;
1115	u8 tmp;
1116	unsigned long flags;
1117
1118	if (!_gadget)
1119		return 0;
1120	dev = container_of(_gadget, struct net2272, gadget);
1121
1122	spin_lock_irqsave(&dev->lock, flags);
1123	tmp = net2272_read(dev, USBCTL0);
1124	if (tmp & (1 << IO_WAKEUP_ENABLE))
1125		net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
1126
1127	spin_unlock_irqrestore(&dev->lock, flags);
1128
1129	return 0;
1130}
1131
1132static int
1133net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
1134{
1135	if (!_gadget)
1136		return -ENODEV;
1137
1138	_gadget->is_selfpowered = (value != 0);
1139
1140	return 0;
1141}
1142
1143static int
1144net2272_pullup(struct usb_gadget *_gadget, int is_on)
1145{
1146	struct net2272 *dev;
1147	u8 tmp;
1148	unsigned long flags;
1149
1150	if (!_gadget)
1151		return -ENODEV;
1152	dev = container_of(_gadget, struct net2272, gadget);
1153
1154	spin_lock_irqsave(&dev->lock, flags);
1155	tmp = net2272_read(dev, USBCTL0);
1156	dev->softconnect = (is_on != 0);
1157	if (is_on)
1158		tmp |= (1 << USB_DETECT_ENABLE);
1159	else
1160		tmp &= ~(1 << USB_DETECT_ENABLE);
1161	net2272_write(dev, USBCTL0, tmp);
1162	spin_unlock_irqrestore(&dev->lock, flags);
1163
1164	return 0;
1165}
1166
1167static int net2272_start(struct usb_gadget *_gadget,
1168		struct usb_gadget_driver *driver);
1169static int net2272_stop(struct usb_gadget *_gadget);
1170
1171static const struct usb_gadget_ops net2272_ops = {
1172	.get_frame	= net2272_get_frame,
1173	.wakeup		= net2272_wakeup,
1174	.set_selfpowered = net2272_set_selfpowered,
1175	.pullup		= net2272_pullup,
1176	.udc_start	= net2272_start,
1177	.udc_stop	= net2272_stop,
1178};
1179
1180/*---------------------------------------------------------------------------*/
1181
1182static ssize_t
1183registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
1184{
1185	struct net2272 *dev;
1186	char *next;
1187	unsigned size, t;
1188	unsigned long flags;
1189	u8 t1, t2;
1190	int i;
1191	const char *s;
1192
1193	dev = dev_get_drvdata(_dev);
1194	next = buf;
1195	size = PAGE_SIZE;
1196	spin_lock_irqsave(&dev->lock, flags);
1197
1198	if (dev->driver)
1199		s = dev->driver->driver.name;
1200	else
1201		s = "(none)";
1202
1203	/* Main Control Registers */
1204	t = scnprintf(next, size, "%s version %s,"
1205		"chiprev %02x, locctl %02x\n"
1206		"irqenb0 %02x irqenb1 %02x "
1207		"irqstat0 %02x irqstat1 %02x\n",
1208		driver_name, driver_vers, dev->chiprev,
1209		net2272_read(dev, LOCCTL),
1210		net2272_read(dev, IRQENB0),
1211		net2272_read(dev, IRQENB1),
1212		net2272_read(dev, IRQSTAT0),
1213		net2272_read(dev, IRQSTAT1));
1214	size -= t;
1215	next += t;
1216
1217	/* DMA */
1218	t1 = net2272_read(dev, DMAREQ);
1219	t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
1220		t1, ep_name[(t1 & 0x01) + 1],
1221		t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
1222		t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
1223		t1 & (1 << DMA_REQUEST) ? "req " : "",
1224		t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
1225	size -= t;
1226	next += t;
1227
1228	/* USB Control Registers */
1229	t1 = net2272_read(dev, USBCTL1);
1230	if (t1 & (1 << VBUS_PIN)) {
1231		if (t1 & (1 << USB_HIGH_SPEED))
1232			s = "high speed";
1233		else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1234			s = "powered";
1235		else
1236			s = "full speed";
1237	} else
1238		s = "not attached";
1239	t = scnprintf(next, size,
1240		"usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
1241		net2272_read(dev, USBCTL0), t1,
1242		net2272_read(dev, OURADDR), s);
1243	size -= t;
1244	next += t;
1245
1246	/* Endpoint Registers */
1247	for (i = 0; i < 4; ++i) {
1248		struct net2272_ep *ep;
1249
1250		ep = &dev->ep[i];
1251		if (i && !ep->desc)
1252			continue;
1253
1254		t1 = net2272_ep_read(ep, EP_CFG);
1255		t2 = net2272_ep_read(ep, EP_RSPSET);
1256		t = scnprintf(next, size,
1257			"\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
1258			"irqenb %02x\n",
1259			ep->ep.name, t1, t2,
1260			(t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
1261			(t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
1262			(t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
1263			(t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
1264			(t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
1265			(t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
1266			(t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
1267			(t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
1268			net2272_ep_read(ep, EP_IRQENB));
1269		size -= t;
1270		next += t;
1271
1272		t = scnprintf(next, size,
1273			"\tstat0 %02x stat1 %02x avail %04x "
1274			"(ep%d%s-%s)%s\n",
1275			net2272_ep_read(ep, EP_STAT0),
1276			net2272_ep_read(ep, EP_STAT1),
1277			(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
1278			t1 & 0x0f,
1279			ep->is_in ? "in" : "out",
1280			type_string(t1 >> 5),
1281			ep->stopped ? "*" : "");
1282		size -= t;
1283		next += t;
1284
1285		t = scnprintf(next, size,
1286			"\tep_transfer %06x\n",
1287			((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
1288			((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
1289			((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
1290		size -= t;
1291		next += t;
1292
1293		t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
1294		t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
1295		t = scnprintf(next, size,
1296			"\tbuf-a %s buf-b %s\n",
1297			buf_state_string(t1),
1298			buf_state_string(t2));
1299		size -= t;
1300		next += t;
1301	}
1302
1303	spin_unlock_irqrestore(&dev->lock, flags);
1304
1305	return PAGE_SIZE - size;
1306}
1307static DEVICE_ATTR_RO(registers);
1308
1309/*---------------------------------------------------------------------------*/
1310
1311static void
1312net2272_set_fifo_mode(struct net2272 *dev, int mode)
1313{
1314	u8 tmp;
1315
1316	tmp = net2272_read(dev, LOCCTL) & 0x3f;
1317	tmp |= (mode << 6);
1318	net2272_write(dev, LOCCTL, tmp);
1319
1320	INIT_LIST_HEAD(&dev->gadget.ep_list);
1321
1322	/* always ep-a, ep-c ... maybe not ep-b */
1323	list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1324
1325	switch (mode) {
1326	case 0:
1327		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1328		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
1329		break;
1330	case 1:
1331		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1332		dev->ep[1].fifo_size = 1024;
1333		dev->ep[2].fifo_size = 512;
1334		break;
1335	case 2:
1336		list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1337		dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1338		break;
1339	case 3:
1340		dev->ep[1].fifo_size = 1024;
1341		break;
1342	}
1343
1344	/* ep-c is always 2 512 byte buffers */
1345	list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1346	dev->ep[3].fifo_size = 512;
1347}
1348
1349/*---------------------------------------------------------------------------*/
1350
1351static void
1352net2272_usb_reset(struct net2272 *dev)
1353{
1354	dev->gadget.speed = USB_SPEED_UNKNOWN;
1355
1356	net2272_cancel_dma(dev);
1357
1358	net2272_write(dev, IRQENB0, 0);
1359	net2272_write(dev, IRQENB1, 0);
1360
1361	/* clear irq state */
1362	net2272_write(dev, IRQSTAT0, 0xff);
1363	net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
1364
1365	net2272_write(dev, DMAREQ,
1366		(0 << DMA_BUFFER_VALID) |
1367		(0 << DMA_REQUEST_ENABLE) |
1368		(1 << DMA_CONTROL_DACK) |
1369		(dev->dma_eot_polarity << EOT_POLARITY) |
1370		(dev->dma_dack_polarity << DACK_POLARITY) |
1371		(dev->dma_dreq_polarity << DREQ_POLARITY) |
1372		((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
1373
1374	net2272_cancel_dma(dev);
1375	net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
1376
1377	/* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
1378	 * note that the higher level gadget drivers are expected to convert data to little endian.
1379	 * Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
1380	 */
1381	net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
1382	net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
1383}
1384
1385static void
1386net2272_usb_reinit(struct net2272 *dev)
1387{
1388	int i;
1389
1390	/* basic endpoint init */
1391	for (i = 0; i < 4; ++i) {
1392		struct net2272_ep *ep = &dev->ep[i];
1393
1394		ep->ep.name = ep_name[i];
1395		ep->dev = dev;
1396		ep->num = i;
1397		ep->not_empty = 0;
1398
1399		if (use_dma && ep->num == dma_ep)
1400			ep->dma = 1;
1401
1402		if (i > 0 && i <= 3)
1403			ep->fifo_size = 512;
1404		else
1405			ep->fifo_size = 64;
1406		net2272_ep_reset(ep);
1407
1408		if (i == 0) {
1409			ep->ep.caps.type_control = true;
1410		} else {
1411			ep->ep.caps.type_iso = true;
1412			ep->ep.caps.type_bulk = true;
1413			ep->ep.caps.type_int = true;
1414		}
1415
1416		ep->ep.caps.dir_in = true;
1417		ep->ep.caps.dir_out = true;
1418	}
1419	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1420
1421	dev->gadget.ep0 = &dev->ep[0].ep;
1422	dev->ep[0].stopped = 0;
1423	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1424}
1425
1426static void
1427net2272_ep0_start(struct net2272 *dev)
1428{
1429	struct net2272_ep *ep0 = &dev->ep[0];
1430
1431	net2272_ep_write(ep0, EP_RSPSET,
1432		(1 << NAK_OUT_PACKETS_MODE) |
1433		(1 << ALT_NAK_OUT_PACKETS));
1434	net2272_ep_write(ep0, EP_RSPCLR,
1435		(1 << HIDE_STATUS_PHASE) |
1436		(1 << CONTROL_STATUS_PHASE_HANDSHAKE));
1437	net2272_write(dev, USBCTL0,
1438		(dev->softconnect << USB_DETECT_ENABLE) |
1439		(1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
1440		(1 << IO_WAKEUP_ENABLE));
1441	net2272_write(dev, IRQENB0,
1442		(1 << SETUP_PACKET_INTERRUPT_ENABLE) |
1443		(1 << ENDPOINT_0_INTERRUPT_ENABLE) |
1444		(1 << DMA_DONE_INTERRUPT_ENABLE));
1445	net2272_write(dev, IRQENB1,
1446		(1 << VBUS_INTERRUPT_ENABLE) |
1447		(1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
1448		(1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
1449}
1450
1451/* when a driver is successfully registered, it will receive
1452 * control requests including set_configuration(), which enables
1453 * non-control requests.  then usb traffic follows until a
1454 * disconnect is reported.  then a host may connect again, or
1455 * the driver might get unbound.
1456 */
1457static int net2272_start(struct usb_gadget *_gadget,
1458		struct usb_gadget_driver *driver)
1459{
1460	struct net2272 *dev;
1461	unsigned i;
1462
1463	if (!driver || !driver->setup ||
1464	    driver->max_speed != USB_SPEED_HIGH)
1465		return -EINVAL;
1466
1467	dev = container_of(_gadget, struct net2272, gadget);
1468
1469	for (i = 0; i < 4; ++i)
1470		dev->ep[i].irqs = 0;
1471	/* hook up the driver ... */
1472	dev->softconnect = 1;
1473	driver->driver.bus = NULL;
1474	dev->driver = driver;
1475
1476	/* ... then enable host detection and ep0; and we're ready
1477	 * for set_configuration as well as eventual disconnect.
1478	 */
1479	net2272_ep0_start(dev);
1480
1481	return 0;
1482}
1483
1484static void
1485stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
1486{
1487	int i;
1488
1489	/* don't disconnect if it's not connected */
1490	if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1491		driver = NULL;
1492
1493	/* stop hardware; prevent new request submissions;
1494	 * and kill any outstanding requests.
1495	 */
1496	net2272_usb_reset(dev);
1497	for (i = 0; i < 4; ++i)
1498		net2272_dequeue_all(&dev->ep[i]);
1499
1500	/* report disconnect; the driver is already quiesced */
1501	if (driver) {
1502		spin_unlock(&dev->lock);
1503		driver->disconnect(&dev->gadget);
1504		spin_lock(&dev->lock);
1505	}
1506
1507	net2272_usb_reinit(dev);
1508}
1509
1510static int net2272_stop(struct usb_gadget *_gadget)
1511{
1512	struct net2272 *dev;
1513	unsigned long flags;
1514
1515	dev = container_of(_gadget, struct net2272, gadget);
1516
1517	spin_lock_irqsave(&dev->lock, flags);
1518	stop_activity(dev, NULL);
1519	spin_unlock_irqrestore(&dev->lock, flags);
1520
1521	dev->driver = NULL;
1522
1523	return 0;
1524}
1525
1526/*---------------------------------------------------------------------------*/
1527/* handle ep-a/ep-b dma completions */
1528static void
1529net2272_handle_dma(struct net2272_ep *ep)
1530{
1531	struct net2272_request *req;
1532	unsigned len;
1533	int status;
1534
1535	if (!list_empty(&ep->queue))
1536		req = list_entry(ep->queue.next,
1537				struct net2272_request, queue);
1538	else
1539		req = NULL;
1540
1541	dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
1542
1543	/* Ensure DREQ is de-asserted */
1544	net2272_write(ep->dev, DMAREQ,
1545		(0 << DMA_BUFFER_VALID)
1546	      | (0 << DMA_REQUEST_ENABLE)
1547	      | (1 << DMA_CONTROL_DACK)
1548	      | (ep->dev->dma_eot_polarity << EOT_POLARITY)
1549	      | (ep->dev->dma_dack_polarity << DACK_POLARITY)
1550	      | (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
1551	      | (ep->dma << DMA_ENDPOINT_SELECT));
1552
1553	ep->dev->dma_busy = 0;
1554
1555	net2272_ep_write(ep, EP_IRQENB,
1556		  (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1557		| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1558		| net2272_ep_read(ep, EP_IRQENB));
1559
1560	/* device-to-host transfer completed */
1561	if (ep->is_in) {
1562		/* validate a short packet or zlp if necessary */
1563		if ((req->req.length % ep->ep.maxpacket != 0) ||
1564				req->req.zero)
1565			set_fifo_bytecount(ep, 0);
1566
1567		net2272_done(ep, req, 0);
1568		if (!list_empty(&ep->queue)) {
1569			req = list_entry(ep->queue.next,
1570					struct net2272_request, queue);
1571			status = net2272_kick_dma(ep, req);
1572			if (status < 0)
1573				net2272_pio_advance(ep);
1574		}
1575
1576	/* host-to-device transfer completed */
1577	} else {
1578		/* terminated with a short packet? */
1579		if (net2272_read(ep->dev, IRQSTAT0) &
1580				(1 << DMA_DONE_INTERRUPT)) {
1581			/* abort system dma */
1582			net2272_cancel_dma(ep->dev);
1583		}
1584
1585		/* EP_TRANSFER will contain the number of bytes
1586		 * actually received.
1587		 * NOTE: There is no overflow detection on EP_TRANSFER:
1588		 * We can't deal with transfers larger than 2^24 bytes!
1589		 */
1590		len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
1591			| (net2272_ep_read(ep, EP_TRANSFER1) << 8)
1592			| (net2272_ep_read(ep, EP_TRANSFER0));
1593
1594		if (ep->not_empty)
1595			len += 4;
1596
1597		req->req.actual += len;
1598
1599		/* get any remaining data */
1600		net2272_pio_advance(ep);
1601	}
1602}
1603
1604/*---------------------------------------------------------------------------*/
1605
1606static void
1607net2272_handle_ep(struct net2272_ep *ep)
1608{
1609	struct net2272_request *req;
1610	u8 stat0, stat1;
1611
1612	if (!list_empty(&ep->queue))
1613		req = list_entry(ep->queue.next,
1614			struct net2272_request, queue);
1615	else
1616		req = NULL;
1617
1618	/* ack all, and handle what we care about */
1619	stat0 = net2272_ep_read(ep, EP_STAT0);
1620	stat1 = net2272_ep_read(ep, EP_STAT1);
1621	ep->irqs++;
1622
1623	dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
1624		ep->ep.name, stat0, stat1, req ? &req->req : NULL);
1625
1626	net2272_ep_write(ep, EP_STAT0, stat0 &
1627		~((1 << NAK_OUT_PACKETS)
1628		| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
1629	net2272_ep_write(ep, EP_STAT1, stat1);
1630
1631	/* data packet(s) received (in the fifo, OUT)
1632	 * direction must be validated, otherwise control read status phase
1633	 * could be interpreted as a valid packet
1634	 */
1635	if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
1636		net2272_pio_advance(ep);
1637	/* data packet(s) transmitted (IN) */
1638	else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
1639		net2272_pio_advance(ep);
1640}
1641
1642static struct net2272_ep *
1643net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
1644{
1645	struct net2272_ep *ep;
1646
1647	if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1648		return &dev->ep[0];
1649
1650	list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1651		u8 bEndpointAddress;
1652
1653		if (!ep->desc)
1654			continue;
1655		bEndpointAddress = ep->desc->bEndpointAddress;
1656		if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1657			continue;
1658		if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
1659			return ep;
1660	}
1661	return NULL;
1662}
1663
1664/*
1665 * USB Test Packet:
1666 * JKJKJKJK * 9
1667 * JJKKJJKK * 8
1668 * JJJJKKKK * 8
1669 * JJJJJJJKKKKKKK * 8
1670 * JJJJJJJK * 8
1671 * {JKKKKKKK * 10}, JK
1672 */
1673static const u8 net2272_test_packet[] = {
1674	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1675	0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1676	0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1677	0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1678	0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1679	0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
1680};
1681
1682static void
1683net2272_set_test_mode(struct net2272 *dev, int mode)
1684{
1685	int i;
1686
1687	/* Disable all net2272 interrupts:
1688	 * Nothing but a power cycle should stop the test.
1689	 */
1690	net2272_write(dev, IRQENB0, 0x00);
1691	net2272_write(dev, IRQENB1, 0x00);
1692
1693	/* Force tranceiver to high-speed */
1694	net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
1695
1696	net2272_write(dev, PAGESEL, 0);
1697	net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
1698	net2272_write(dev, EP_RSPCLR,
1699			  (1 << CONTROL_STATUS_PHASE_HANDSHAKE)
1700			| (1 << HIDE_STATUS_PHASE));
1701	net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
1702	net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
1703
1704	/* wait for status phase to complete */
1705	while (!(net2272_read(dev, EP_STAT0) &
1706				(1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
1707		;
1708
1709	/* Enable test mode */
1710	net2272_write(dev, USBTEST, mode);
1711
1712	/* load test packet */
1713	if (mode == TEST_PACKET) {
1714		/* switch to 8 bit mode */
1715		net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
1716				~(1 << DATA_WIDTH));
1717
1718		for (i = 0; i < sizeof(net2272_test_packet); ++i)
1719			net2272_write(dev, EP_DATA, net2272_test_packet[i]);
1720
1721		/* Validate test packet */
1722		net2272_write(dev, EP_TRANSFER0, 0);
1723	}
1724}
1725
1726static void
1727net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
1728{
1729	struct net2272_ep *ep;
1730	u8 num, scratch;
1731
1732	/* starting a control request? */
1733	if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
1734		union {
1735			u8 raw[8];
1736			struct usb_ctrlrequest	r;
1737		} u;
1738		int tmp = 0;
1739		struct net2272_request *req;
1740
1741		if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
1742			if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
1743				dev->gadget.speed = USB_SPEED_HIGH;
1744			else
1745				dev->gadget.speed = USB_SPEED_FULL;
1746			dev_dbg(dev->dev, "%s\n",
1747				usb_speed_string(dev->gadget.speed));
1748		}
1749
1750		ep = &dev->ep[0];
1751		ep->irqs++;
1752
1753		/* make sure any leftover interrupt state is cleared */
1754		stat &= ~(1 << ENDPOINT_0_INTERRUPT);
1755		while (!list_empty(&ep->queue)) {
1756			req = list_entry(ep->queue.next,
1757				struct net2272_request, queue);
1758			net2272_done(ep, req,
1759				(req->req.actual == req->req.length) ? 0 : -EPROTO);
1760		}
1761		ep->stopped = 0;
1762		dev->protocol_stall = 0;
1763		net2272_ep_write(ep, EP_STAT0,
1764			    (1 << DATA_IN_TOKEN_INTERRUPT)
1765			  | (1 << DATA_OUT_TOKEN_INTERRUPT)
1766			  | (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
1767			  | (1 << DATA_PACKET_RECEIVED_INTERRUPT)
1768			  | (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
1769		net2272_ep_write(ep, EP_STAT1,
1770			    (1 << TIMEOUT)
1771			  | (1 << USB_OUT_ACK_SENT)
1772			  | (1 << USB_OUT_NAK_SENT)
1773			  | (1 << USB_IN_ACK_RCVD)
1774			  | (1 << USB_IN_NAK_SENT)
1775			  | (1 << USB_STALL_SENT)
1776			  | (1 << LOCAL_OUT_ZLP));
1777
1778		/*
1779		 * Ensure Control Read pre-validation setting is beyond maximum size
1780		 *  - Control Writes can leave non-zero values in EP_TRANSFER. If
1781		 *    an EP0 transfer following the Control Write is a Control Read,
1782		 *    the NET2272 sees the non-zero EP_TRANSFER as an unexpected
1783		 *    pre-validation count.
1784		 *  - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
1785		 *    the pre-validation count cannot cause an unexpected validatation
1786		 */
1787		net2272_write(dev, PAGESEL, 0);
1788		net2272_write(dev, EP_TRANSFER2, 0xff);
1789		net2272_write(dev, EP_TRANSFER1, 0xff);
1790		net2272_write(dev, EP_TRANSFER0, 0xff);
1791
1792		u.raw[0] = net2272_read(dev, SETUP0);
1793		u.raw[1] = net2272_read(dev, SETUP1);
1794		u.raw[2] = net2272_read(dev, SETUP2);
1795		u.raw[3] = net2272_read(dev, SETUP3);
1796		u.raw[4] = net2272_read(dev, SETUP4);
1797		u.raw[5] = net2272_read(dev, SETUP5);
1798		u.raw[6] = net2272_read(dev, SETUP6);
1799		u.raw[7] = net2272_read(dev, SETUP7);
1800		/*
1801		 * If you have a big endian cpu make sure le16_to_cpus
1802		 * performs the proper byte swapping here...
1803		 */
1804		le16_to_cpus(&u.r.wValue);
1805		le16_to_cpus(&u.r.wIndex);
1806		le16_to_cpus(&u.r.wLength);
1807
1808		/* ack the irq */
1809		net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
1810		stat ^= (1 << SETUP_PACKET_INTERRUPT);
1811
1812		/* watch control traffic at the token level, and force
1813		 * synchronization before letting the status phase happen.
1814		 */
1815		ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
1816		if (ep->is_in) {
1817			scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
1818				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1819				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1820			stop_out_naking(ep);
1821		} else
1822			scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
1823				| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
1824				| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
1825		net2272_ep_write(ep, EP_IRQENB, scratch);
1826
1827		if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
1828			goto delegate;
1829		switch (u.r.bRequest) {
1830		case USB_REQ_GET_STATUS: {
1831			struct net2272_ep *e;
1832			u16 status = 0;
1833
1834			switch (u.r.bRequestType & USB_RECIP_MASK) {
1835			case USB_RECIP_ENDPOINT:
1836				e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1837				if (!e || u.r.wLength > 2)
1838					goto do_stall;
1839				if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
1840					status = cpu_to_le16(1);
1841				else
1842					status = cpu_to_le16(0);
1843
1844				/* don't bother with a request object! */
1845				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1846				writew(status, net2272_reg_addr(dev, EP_DATA));
1847				set_fifo_bytecount(&dev->ep[0], 0);
1848				allow_status(ep);
1849				dev_vdbg(dev->dev, "%s stat %02x\n",
1850					ep->ep.name, status);
1851				goto next_endpoints;
1852			case USB_RECIP_DEVICE:
1853				if (u.r.wLength > 2)
1854					goto do_stall;
1855				if (dev->gadget.is_selfpowered)
1856					status = (1 << USB_DEVICE_SELF_POWERED);
1857
1858				/* don't bother with a request object! */
1859				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1860				writew(status, net2272_reg_addr(dev, EP_DATA));
1861				set_fifo_bytecount(&dev->ep[0], 0);
1862				allow_status(ep);
1863				dev_vdbg(dev->dev, "device stat %02x\n", status);
1864				goto next_endpoints;
1865			case USB_RECIP_INTERFACE:
1866				if (u.r.wLength > 2)
1867					goto do_stall;
1868
1869				/* don't bother with a request object! */
1870				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
1871				writew(status, net2272_reg_addr(dev, EP_DATA));
1872				set_fifo_bytecount(&dev->ep[0], 0);
1873				allow_status(ep);
1874				dev_vdbg(dev->dev, "interface status %02x\n", status);
1875				goto next_endpoints;
1876			}
1877
1878			break;
1879		}
1880		case USB_REQ_CLEAR_FEATURE: {
1881			struct net2272_ep *e;
1882
1883			if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1884				goto delegate;
1885			if (u.r.wValue != USB_ENDPOINT_HALT ||
1886			    u.r.wLength != 0)
1887				goto do_stall;
1888			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1889			if (!e)
1890				goto do_stall;
1891			if (e->wedged) {
1892				dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
1893					ep->ep.name);
1894			} else {
1895				dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
1896				clear_halt(e);
1897			}
1898			allow_status(ep);
1899			goto next_endpoints;
1900		}
1901		case USB_REQ_SET_FEATURE: {
1902			struct net2272_ep *e;
1903
1904			if (u.r.bRequestType == USB_RECIP_DEVICE) {
1905				if (u.r.wIndex != NORMAL_OPERATION)
1906					net2272_set_test_mode(dev, (u.r.wIndex >> 8));
1907				allow_status(ep);
1908				dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
1909				goto next_endpoints;
1910			} else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
1911				goto delegate;
1912			if (u.r.wValue != USB_ENDPOINT_HALT ||
1913			    u.r.wLength != 0)
1914				goto do_stall;
1915			e = net2272_get_ep_by_addr(dev, u.r.wIndex);
1916			if (!e)
1917				goto do_stall;
1918			set_halt(e);
1919			allow_status(ep);
1920			dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
1921			goto next_endpoints;
1922		}
1923		case USB_REQ_SET_ADDRESS: {
1924			net2272_write(dev, OURADDR, u.r.wValue & 0xff);
1925			allow_status(ep);
1926			break;
1927		}
1928		default:
1929 delegate:
1930			dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
1931				"ep_cfg %08x\n",
1932				u.r.bRequestType, u.r.bRequest,
1933				u.r.wValue, u.r.wIndex,
1934				net2272_ep_read(ep, EP_CFG));
1935			spin_unlock(&dev->lock);
1936			tmp = dev->driver->setup(&dev->gadget, &u.r);
1937			spin_lock(&dev->lock);
1938		}
1939
1940		/* stall ep0 on error */
1941		if (tmp < 0) {
1942 do_stall:
1943			dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
1944				u.r.bRequestType, u.r.bRequest, tmp);
1945			dev->protocol_stall = 1;
1946		}
1947	/* endpoint dma irq? */
1948	} else if (stat & (1 << DMA_DONE_INTERRUPT)) {
1949		net2272_cancel_dma(dev);
1950		net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
1951		stat &= ~(1 << DMA_DONE_INTERRUPT);
1952		num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
1953			? 2 : 1;
1954
1955		ep = &dev->ep[num];
1956		net2272_handle_dma(ep);
1957	}
1958
1959 next_endpoints:
1960	/* endpoint data irq? */
1961	scratch = stat & 0x0f;
1962	stat &= ~0x0f;
1963	for (num = 0; scratch; num++) {
1964		u8 t;
1965
1966		/* does this endpoint's FIFO and queue need tending? */
1967		t = 1 << num;
1968		if ((scratch & t) == 0)
1969			continue;
1970		scratch ^= t;
1971
1972		ep = &dev->ep[num];
1973		net2272_handle_ep(ep);
1974	}
1975
1976	/* some interrupts we can just ignore */
1977	stat &= ~(1 << SOF_INTERRUPT);
1978
1979	if (stat)
1980		dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
1981}
1982
1983static void
1984net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
1985{
1986	u8 tmp, mask;
1987
1988	/* after disconnect there's nothing else to do! */
1989	tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
1990	mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
1991
1992	if (stat & tmp) {
1993		bool	reset = false;
1994		bool	disconnect = false;
1995
1996		/*
1997		 * Ignore disconnects and resets if the speed hasn't been set.
1998		 * VBUS can bounce and there's always an initial reset.
1999		 */
2000		net2272_write(dev, IRQSTAT1, tmp);
2001		if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
2002			if ((stat & (1 << VBUS_INTERRUPT)) &&
2003					(net2272_read(dev, USBCTL1) &
2004						(1 << VBUS_PIN)) == 0) {
2005				disconnect = true;
2006				dev_dbg(dev->dev, "disconnect %s\n",
2007					dev->driver->driver.name);
2008			} else if ((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
2009					(net2272_read(dev, USBCTL1) & mask)
2010						== 0) {
2011				reset = true;
2012				dev_dbg(dev->dev, "reset %s\n",
2013					dev->driver->driver.name);
2014			}
2015
2016			if (disconnect || reset) {
2017				stop_activity(dev, dev->driver);
2018				net2272_ep0_start(dev);
2019				spin_unlock(&dev->lock);
2020				if (reset)
2021					usb_gadget_udc_reset
2022						(&dev->gadget, dev->driver);
2023				else
2024					(dev->driver->disconnect)
2025						(&dev->gadget);
2026				spin_lock(&dev->lock);
2027				return;
2028			}
2029		}
2030		stat &= ~tmp;
2031
2032		if (!stat)
2033			return;
2034	}
2035
2036	tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
2037	if (stat & tmp) {
2038		net2272_write(dev, IRQSTAT1, tmp);
2039		if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
2040			if (dev->driver->suspend)
2041				dev->driver->suspend(&dev->gadget);
2042			if (!enable_suspend) {
2043				stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
2044				dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
2045			}
2046		} else {
2047			if (dev->driver->resume)
2048				dev->driver->resume(&dev->gadget);
2049		}
2050		stat &= ~tmp;
2051	}
2052
2053	/* clear any other status/irqs */
2054	if (stat)
2055		net2272_write(dev, IRQSTAT1, stat);
2056
2057	/* some status we can just ignore */
2058	stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
2059			| (1 << SUSPEND_REQUEST_INTERRUPT)
2060			| (1 << RESUME_INTERRUPT));
2061	if (!stat)
2062		return;
2063	else
2064		dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
2065}
2066
2067static irqreturn_t net2272_irq(int irq, void *_dev)
2068{
2069	struct net2272 *dev = _dev;
2070#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
2071	u32 intcsr;
2072#endif
2073#if defined(PLX_PCI_RDK)
2074	u8 dmareq;
2075#endif
2076	spin_lock(&dev->lock);
2077#if defined(PLX_PCI_RDK)
2078	intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2079
2080	if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
2081		writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
2082				dev->rdk1.plx9054_base_addr + INTCSR);
2083		net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2084		net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2085		intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
2086		writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
2087			dev->rdk1.plx9054_base_addr + INTCSR);
2088	}
2089	if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
2090		writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2091				dev->rdk1.plx9054_base_addr + DMACSR0);
2092
2093		dmareq = net2272_read(dev, DMAREQ);
2094		if (dmareq & 0x01)
2095			net2272_handle_dma(&dev->ep[2]);
2096		else
2097			net2272_handle_dma(&dev->ep[1]);
2098	}
2099#endif
2100#if defined(PLX_PCI_RDK2)
2101	/* see if PCI int for us by checking irqstat */
2102	intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2103	if (!intcsr & (1 << NET2272_PCI_IRQ)) {
2104		spin_unlock(&dev->lock);
2105		return IRQ_NONE;
2106	}
2107	/* check dma interrupts */
2108#endif
2109	/* Platform/devcice interrupt handler */
2110#if !defined(PLX_PCI_RDK)
2111	net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
2112	net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
2113#endif
2114	spin_unlock(&dev->lock);
2115
2116	return IRQ_HANDLED;
2117}
2118
2119static int net2272_present(struct net2272 *dev)
2120{
2121	/*
2122	 * Quick test to see if CPU can communicate properly with the NET2272.
2123	 * Verifies connection using writes and reads to write/read and
2124	 * read-only registers.
2125	 *
2126	 * This routine is strongly recommended especially during early bring-up
2127	 * of new hardware, however for designs that do not apply Power On System
2128	 * Tests (POST) it may discarded (or perhaps minimized).
2129	 */
2130	unsigned int ii;
2131	u8 val, refval;
2132
2133	/* Verify NET2272 write/read SCRATCH register can write and read */
2134	refval = net2272_read(dev, SCRATCH);
2135	for (ii = 0; ii < 0x100; ii += 7) {
2136		net2272_write(dev, SCRATCH, ii);
2137		val = net2272_read(dev, SCRATCH);
2138		if (val != ii) {
2139			dev_dbg(dev->dev,
2140				"%s: write/read SCRATCH register test failed: "
2141				"wrote:0x%2.2x, read:0x%2.2x\n",
2142				__func__, ii, val);
2143			return -EINVAL;
2144		}
2145	}
2146	/* To be nice, we write the original SCRATCH value back: */
2147	net2272_write(dev, SCRATCH, refval);
2148
2149	/* Verify NET2272 CHIPREV register is read-only: */
2150	refval = net2272_read(dev, CHIPREV_2272);
2151	for (ii = 0; ii < 0x100; ii += 7) {
2152		net2272_write(dev, CHIPREV_2272, ii);
2153		val = net2272_read(dev, CHIPREV_2272);
2154		if (val != refval) {
2155			dev_dbg(dev->dev,
2156				"%s: write/read CHIPREV register test failed: "
2157				"wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
2158				__func__, ii, val, refval);
2159			return -EINVAL;
2160		}
2161	}
2162
2163	/*
2164	 * Verify NET2272's "NET2270 legacy revision" register
2165	 *  - NET2272 has two revision registers. The NET2270 legacy revision
2166	 *    register should read the same value, regardless of the NET2272
2167	 *    silicon revision.  The legacy register applies to NET2270
2168	 *    firmware being applied to the NET2272.
2169	 */
2170	val = net2272_read(dev, CHIPREV_LEGACY);
2171	if (val != NET2270_LEGACY_REV) {
2172		/*
2173		 * Unexpected legacy revision value
2174		 * - Perhaps the chip is a NET2270?
2175		 */
2176		dev_dbg(dev->dev,
2177			"%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
2178			" - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
2179			__func__, NET2270_LEGACY_REV, val);
2180		return -EINVAL;
2181	}
2182
2183	/*
2184	 * Verify NET2272 silicon revision
2185	 *  - This revision register is appropriate for the silicon version
2186	 *    of the NET2272
2187	 */
2188	val = net2272_read(dev, CHIPREV_2272);
2189	switch (val) {
2190	case CHIPREV_NET2272_R1:
2191		/*
2192		 * NET2272 Rev 1 has DMA related errata:
2193		 *  - Newer silicon (Rev 1A or better) required
2194		 */
2195		dev_dbg(dev->dev,
2196			"%s: Rev 1 detected: newer silicon recommended for DMA support\n",
2197			__func__);
2198		break;
2199	case CHIPREV_NET2272_R1A:
2200		break;
2201	default:
2202		/* NET2272 silicon version *may* not work with this firmware */
2203		dev_dbg(dev->dev,
2204			"%s: unexpected silicon revision register value: "
2205			" CHIPREV_2272: 0x%2.2x\n",
2206			__func__, val);
2207		/*
2208		 * Return Success, even though the chip rev is not an expected value
2209		 *  - Older, pre-built firmware can attempt to operate on newer silicon
2210		 *  - Often, new silicon is perfectly compatible
2211		 */
2212	}
2213
2214	/* Success: NET2272 checks out OK */
2215	return 0;
2216}
2217
2218static void
2219net2272_gadget_release(struct device *_dev)
2220{
2221	struct net2272 *dev = dev_get_drvdata(_dev);
2222	kfree(dev);
2223}
2224
2225/*---------------------------------------------------------------------------*/
2226
2227static void
2228net2272_remove(struct net2272 *dev)
2229{
2230	usb_del_gadget_udc(&dev->gadget);
2231	free_irq(dev->irq, dev);
2232	iounmap(dev->base_addr);
2233	device_remove_file(dev->dev, &dev_attr_registers);
2234
2235	dev_info(dev->dev, "unbind\n");
2236}
2237
2238static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq)
2239{
2240	struct net2272 *ret;
2241
2242	if (!irq) {
2243		dev_dbg(dev, "No IRQ!\n");
2244		return ERR_PTR(-ENODEV);
2245	}
2246
2247	/* alloc, and start init */
2248	ret = kzalloc(sizeof(*ret), GFP_KERNEL);
2249	if (!ret)
2250		return ERR_PTR(-ENOMEM);
2251
2252	spin_lock_init(&ret->lock);
2253	ret->irq = irq;
2254	ret->dev = dev;
2255	ret->gadget.ops = &net2272_ops;
2256	ret->gadget.max_speed = USB_SPEED_HIGH;
2257
2258	/* the "gadget" abstracts/virtualizes the controller */
2259	ret->gadget.name = driver_name;
2260
2261	return ret;
2262}
2263
2264static int
2265net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
2266{
2267	int ret;
2268
2269	/* See if there... */
2270	if (net2272_present(dev)) {
2271		dev_warn(dev->dev, "2272 not found!\n");
2272		ret = -ENODEV;
2273		goto err;
2274	}
2275
2276	net2272_usb_reset(dev);
2277	net2272_usb_reinit(dev);
2278
2279	ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
2280	if (ret) {
2281		dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
2282		goto err;
2283	}
2284
2285	dev->chiprev = net2272_read(dev, CHIPREV_2272);
2286
2287	/* done */
2288	dev_info(dev->dev, "%s\n", driver_desc);
2289	dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
2290		dev->irq, dev->base_addr, dev->chiprev,
2291		dma_mode_string());
2292	dev_info(dev->dev, "version: %s\n", driver_vers);
2293
2294	ret = device_create_file(dev->dev, &dev_attr_registers);
2295	if (ret)
2296		goto err_irq;
2297
2298	ret = usb_add_gadget_udc_release(dev->dev, &dev->gadget,
2299			net2272_gadget_release);
2300	if (ret)
2301		goto err_add_udc;
2302
2303	return 0;
2304
2305err_add_udc:
2306	device_remove_file(dev->dev, &dev_attr_registers);
2307 err_irq:
2308	free_irq(dev->irq, dev);
2309 err:
2310	return ret;
2311}
2312
2313#ifdef CONFIG_PCI
2314
2315/*
2316 * wrap this driver around the specified device, but
2317 * don't respond over USB until a gadget driver binds to us
2318 */
2319
2320static int
2321net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
2322{
2323	unsigned long resource, len, tmp;
2324	void __iomem *mem_mapped_addr[4];
2325	int ret, i;
2326
2327	/*
2328	 * BAR 0 holds PLX 9054 config registers
2329	 * BAR 1 is i/o memory; unused here
2330	 * BAR 2 holds EPLD config registers
2331	 * BAR 3 holds NET2272 registers
2332	 */
2333
2334	/* Find and map all address spaces */
2335	for (i = 0; i < 4; ++i) {
2336		if (i == 1)
2337			continue;	/* BAR1 unused */
2338
2339		resource = pci_resource_start(pdev, i);
2340		len = pci_resource_len(pdev, i);
2341
2342		if (!request_mem_region(resource, len, driver_name)) {
2343			dev_dbg(dev->dev, "controller already in use\n");
2344			ret = -EBUSY;
2345			goto err;
2346		}
2347
2348		mem_mapped_addr[i] = ioremap_nocache(resource, len);
2349		if (mem_mapped_addr[i] == NULL) {
2350			release_mem_region(resource, len);
2351			dev_dbg(dev->dev, "can't map memory\n");
2352			ret = -EFAULT;
2353			goto err;
2354		}
2355	}
2356
2357	dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
2358	dev->rdk1.epld_base_addr = mem_mapped_addr[2];
2359	dev->base_addr = mem_mapped_addr[3];
2360
2361	/* Set PLX 9054 bus width (16 bits) */
2362	tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
2363	writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
2364			dev->rdk1.plx9054_base_addr + LBRD1);
2365
2366	/* Enable PLX 9054 Interrupts */
2367	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
2368			(1 << PCI_INTERRUPT_ENABLE) |
2369			(1 << LOCAL_INTERRUPT_INPUT_ENABLE),
2370			dev->rdk1.plx9054_base_addr + INTCSR);
2371
2372	writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
2373			dev->rdk1.plx9054_base_addr + DMACSR0);
2374
2375	/* reset */
2376	writeb((1 << EPLD_DMA_ENABLE) |
2377		(1 << DMA_CTL_DACK) |
2378		(1 << DMA_TIMEOUT_ENABLE) |
2379		(1 << USER) |
2380		(0 << MPX_MODE) |
2381		(1 << BUSWIDTH) |
2382		(1 << NET2272_RESET),
2383		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2384
2385	mb();
2386	writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
2387		~(1 << NET2272_RESET),
2388		dev->base_addr + EPLD_IO_CONTROL_REGISTER);
2389	udelay(200);
2390
2391	return 0;
2392
2393 err:
2394	while (--i >= 0) {
2395		iounmap(mem_mapped_addr[i]);
2396		release_mem_region(pci_resource_start(pdev, i),
2397			pci_resource_len(pdev, i));
2398	}
2399
2400	return ret;
2401}
2402
2403static int
2404net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
2405{
2406	unsigned long resource, len;
2407	void __iomem *mem_mapped_addr[2];
2408	int ret, i;
2409
2410	/*
2411	 * BAR 0 holds FGPA config registers
2412	 * BAR 1 holds NET2272 registers
2413	 */
2414
2415	/* Find and map all address spaces, bar2-3 unused in rdk 2 */
2416	for (i = 0; i < 2; ++i) {
2417		resource = pci_resource_start(pdev, i);
2418		len = pci_resource_len(pdev, i);
2419
2420		if (!request_mem_region(resource, len, driver_name)) {
2421			dev_dbg(dev->dev, "controller already in use\n");
2422			ret = -EBUSY;
2423			goto err;
2424		}
2425
2426		mem_mapped_addr[i] = ioremap_nocache(resource, len);
2427		if (mem_mapped_addr[i] == NULL) {
2428			release_mem_region(resource, len);
2429			dev_dbg(dev->dev, "can't map memory\n");
2430			ret = -EFAULT;
2431			goto err;
2432		}
2433	}
2434
2435	dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
2436	dev->base_addr = mem_mapped_addr[1];
2437
2438	mb();
2439	/* Set 2272 bus width (16 bits) and reset */
2440	writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2441	udelay(200);
2442	writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
2443	/* Print fpga version number */
2444	dev_info(dev->dev, "RDK2 FPGA version %08x\n",
2445		readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
2446	/* Enable FPGA Interrupts */
2447	writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
2448
2449	return 0;
2450
2451 err:
2452	while (--i >= 0) {
2453		iounmap(mem_mapped_addr[i]);
2454		release_mem_region(pci_resource_start(pdev, i),
2455			pci_resource_len(pdev, i));
2456	}
2457
2458	return ret;
2459}
2460
2461static int
2462net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2463{
2464	struct net2272 *dev;
2465	int ret;
2466
2467	dev = net2272_probe_init(&pdev->dev, pdev->irq);
2468	if (IS_ERR(dev))
2469		return PTR_ERR(dev);
2470	dev->dev_id = pdev->device;
2471
2472	if (pci_enable_device(pdev) < 0) {
2473		ret = -ENODEV;
2474		goto err_free;
2475	}
2476
2477	pci_set_master(pdev);
2478
2479	switch (pdev->device) {
2480	case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
2481	case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
2482	default: BUG();
2483	}
2484	if (ret)
2485		goto err_pci;
2486
2487	ret = net2272_probe_fin(dev, 0);
2488	if (ret)
2489		goto err_pci;
2490
2491	pci_set_drvdata(pdev, dev);
2492
2493	return 0;
2494
2495 err_pci:
2496	pci_disable_device(pdev);
2497 err_free:
2498	kfree(dev);
2499
2500	return ret;
2501}
2502
2503static void
2504net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
2505{
2506	int i;
2507
2508	/* disable PLX 9054 interrupts */
2509	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2510		~(1 << PCI_INTERRUPT_ENABLE),
2511		dev->rdk1.plx9054_base_addr + INTCSR);
2512
2513	/* clean up resources allocated during probe() */
2514	iounmap(dev->rdk1.plx9054_base_addr);
2515	iounmap(dev->rdk1.epld_base_addr);
2516
2517	for (i = 0; i < 4; ++i) {
2518		if (i == 1)
2519			continue;	/* BAR1 unused */
2520		release_mem_region(pci_resource_start(pdev, i),
2521			pci_resource_len(pdev, i));
2522	}
2523}
2524
2525static void
2526net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
2527{
2528	int i;
2529
2530	/* disable fpga interrupts
2531	writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
2532			~(1 << PCI_INTERRUPT_ENABLE),
2533			dev->rdk1.plx9054_base_addr + INTCSR);
2534	*/
2535
2536	/* clean up resources allocated during probe() */
2537	iounmap(dev->rdk2.fpga_base_addr);
2538
2539	for (i = 0; i < 2; ++i)
2540		release_mem_region(pci_resource_start(pdev, i),
2541			pci_resource_len(pdev, i));
2542}
2543
2544static void
2545net2272_pci_remove(struct pci_dev *pdev)
2546{
2547	struct net2272 *dev = pci_get_drvdata(pdev);
2548
2549	net2272_remove(dev);
2550
2551	switch (pdev->device) {
2552	case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
2553	case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
2554	default: BUG();
2555	}
2556
2557	pci_disable_device(pdev);
2558
2559	kfree(dev);
2560}
2561
2562/* Table of matching PCI IDs */
2563static struct pci_device_id pci_ids[] = {
2564	{	/* RDK 1 card */
2565		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2566		.class_mask  = 0,
2567		.vendor      = PCI_VENDOR_ID_PLX,
2568		.device      = PCI_DEVICE_ID_RDK1,
2569		.subvendor   = PCI_ANY_ID,
2570		.subdevice   = PCI_ANY_ID,
2571	},
2572	{	/* RDK 2 card */
2573		.class       = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
2574		.class_mask  = 0,
2575		.vendor      = PCI_VENDOR_ID_PLX,
2576		.device      = PCI_DEVICE_ID_RDK2,
2577		.subvendor   = PCI_ANY_ID,
2578		.subdevice   = PCI_ANY_ID,
2579	},
2580	{ }
2581};
2582MODULE_DEVICE_TABLE(pci, pci_ids);
2583
2584static struct pci_driver net2272_pci_driver = {
2585	.name     = driver_name,
2586	.id_table = pci_ids,
2587
2588	.probe    = net2272_pci_probe,
2589	.remove   = net2272_pci_remove,
2590};
2591
2592static int net2272_pci_register(void)
2593{
2594	return pci_register_driver(&net2272_pci_driver);
2595}
2596
2597static void net2272_pci_unregister(void)
2598{
2599	pci_unregister_driver(&net2272_pci_driver);
2600}
2601
2602#else
2603static inline int net2272_pci_register(void) { return 0; }
2604static inline void net2272_pci_unregister(void) { }
2605#endif
2606
2607/*---------------------------------------------------------------------------*/
2608
2609static int
2610net2272_plat_probe(struct platform_device *pdev)
2611{
2612	struct net2272 *dev;
2613	int ret;
2614	unsigned int irqflags;
2615	resource_size_t base, len;
2616	struct resource *iomem, *iomem_bus, *irq_res;
2617
2618	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2619	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2620	iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
2621	if (!irq_res || !iomem) {
2622		dev_err(&pdev->dev, "must provide irq/base addr");
2623		return -EINVAL;
2624	}
2625
2626	dev = net2272_probe_init(&pdev->dev, irq_res->start);
2627	if (IS_ERR(dev))
2628		return PTR_ERR(dev);
2629
2630	irqflags = 0;
2631	if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
2632		irqflags |= IRQF_TRIGGER_RISING;
2633	if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
2634		irqflags |= IRQF_TRIGGER_FALLING;
2635	if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
2636		irqflags |= IRQF_TRIGGER_HIGH;
2637	if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
2638		irqflags |= IRQF_TRIGGER_LOW;
2639
2640	base = iomem->start;
2641	len = resource_size(iomem);
2642	if (iomem_bus)
2643		dev->base_shift = iomem_bus->start;
2644
2645	if (!request_mem_region(base, len, driver_name)) {
2646		dev_dbg(dev->dev, "get request memory region!\n");
2647		ret = -EBUSY;
2648		goto err;
2649	}
2650	dev->base_addr = ioremap_nocache(base, len);
2651	if (!dev->base_addr) {
2652		dev_dbg(dev->dev, "can't map memory\n");
2653		ret = -EFAULT;
2654		goto err_req;
2655	}
2656
2657	ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
2658	if (ret)
2659		goto err_io;
2660
2661	platform_set_drvdata(pdev, dev);
2662	dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
2663		(net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
2664
2665	return 0;
2666
2667 err_io:
2668	iounmap(dev->base_addr);
2669 err_req:
2670	release_mem_region(base, len);
2671 err:
2672	return ret;
2673}
2674
2675static int
2676net2272_plat_remove(struct platform_device *pdev)
2677{
2678	struct net2272 *dev = platform_get_drvdata(pdev);
2679
2680	net2272_remove(dev);
2681
2682	release_mem_region(pdev->resource[0].start,
2683		resource_size(&pdev->resource[0]));
2684
2685	kfree(dev);
2686
2687	return 0;
2688}
2689
2690static struct platform_driver net2272_plat_driver = {
2691	.probe   = net2272_plat_probe,
2692	.remove  = net2272_plat_remove,
2693	.driver  = {
2694		.name  = driver_name,
2695	},
2696	/* FIXME .suspend, .resume */
2697};
2698MODULE_ALIAS("platform:net2272");
2699
2700static int __init net2272_init(void)
2701{
2702	int ret;
2703
2704	ret = net2272_pci_register();
2705	if (ret)
2706		return ret;
2707	ret = platform_driver_register(&net2272_plat_driver);
2708	if (ret)
2709		goto err_pci;
2710	return ret;
2711
2712err_pci:
2713	net2272_pci_unregister();
2714	return ret;
2715}
2716module_init(net2272_init);
2717
2718static void __exit net2272_cleanup(void)
2719{
2720	net2272_pci_unregister();
2721	platform_driver_unregister(&net2272_plat_driver);
2722}
2723module_exit(net2272_cleanup);
2724
2725MODULE_DESCRIPTION(DRIVER_DESC);
2726MODULE_AUTHOR("PLX Technology, Inc.");
2727MODULE_LICENSE("GPL");
2728