root/drivers/usb/host/imx21-hcd.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. hcd_to_imx21
  2. set_register_bits
  3. clear_register_bits
  4. clear_toggle_bit
  5. set_toggle_bit
  6. etd_writel
  7. etd_readl
  8. wrap_frame
  9. frame_after
  10. imx21_hc_get_frame
  11. unsuitable_for_dma
  12. alloc_etd
  13. disactivate_etd
  14. reset_etd
  15. free_etd
  16. setup_etd_dword0
  17. copy_to_dmem
  18. activate_etd
  19. alloc_dmem
  20. activate_queued_etd
  21. free_dmem
  22. free_epdmem
  23. ep_idle
  24. urb_done
  25. nonisoc_urb_completed_for_etd
  26. schedule_isoc_etds
  27. isoc_etd_done
  28. alloc_isoc_ep
  29. alloc_isoc_etds
  30. imx21_hc_urb_enqueue_isoc
  31. dequeue_isoc_urb
  32. schedule_nonisoc_etd
  33. nonisoc_etd_done
  34. alloc_ep
  35. imx21_hc_urb_enqueue
  36. imx21_hc_urb_dequeue
  37. process_etds
  38. imx21_irq
  39. imx21_hc_endpoint_disable
  40. get_hub_descriptor
  41. imx21_hc_hub_status_data
  42. imx21_hc_hub_control
  43. imx21_hc_reset
  44. imx21_hc_start
  45. imx21_hc_stop
  46. imx21_remove
  47. imx21_probe

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /*
   3  * USB Host Controller Driver for IMX21
   4  *
   5  * Copyright (C) 2006 Loping Dog Embedded Systems
   6  * Copyright (C) 2009 Martin Fuzzey
   7  * Originally written by Jay Monkman <jtm@lopingdog.com>
   8  * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
   9  */
  10 
  11 
  12  /*
  13   * The i.MX21 USB hardware contains
  14   *    * 32 transfer descriptors (called ETDs)
  15   *    * 4Kb of Data memory
  16   *
  17   * The data memory is shared between the host and function controllers
  18   * (but this driver only supports the host controller)
  19   *
  20   * So setting up a transfer involves:
  21   *    * Allocating a ETD
  22   *    * Fill in ETD with appropriate information
  23   *    * Allocating data memory (and putting the offset in the ETD)
  24   *    * Activate the ETD
  25   *    * Get interrupt when done.
  26   *
  27   * An ETD is assigned to each active endpoint.
  28   *
  29   * Low resource (ETD and Data memory) situations are handled differently for
  30   * isochronous and non insosynchronous transactions :
  31   *
  32   * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
  33   *
  34   * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
  35   * They allocate both ETDs and Data memory during URB submission
  36   * (and fail if unavailable).
  37   */
  38 
  39 #include <linux/clk.h>
  40 #include <linux/io.h>
  41 #include <linux/kernel.h>
  42 #include <linux/list.h>
  43 #include <linux/platform_device.h>
  44 #include <linux/slab.h>
  45 #include <linux/usb.h>
  46 #include <linux/usb/hcd.h>
  47 #include <linux/dma-mapping.h>
  48 #include <linux/module.h>
  49 
  50 #include "imx21-hcd.h"
  51 
  52 #ifdef CONFIG_DYNAMIC_DEBUG
  53 #define DEBUG
  54 #endif
  55 
  56 #ifdef DEBUG
  57 #define DEBUG_LOG_FRAME(imx21, etd, event) \
  58         (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
  59 #else
  60 #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
  61 #endif
  62 
  63 static const char hcd_name[] = "imx21-hcd";
  64 
  65 static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
  66 {
  67         return (struct imx21 *)hcd->hcd_priv;
  68 }
  69 
  70 
  71 /* =========================================== */
  72 /* Hardware access helpers                      */
  73 /* =========================================== */
  74 
  75 static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
  76 {
  77         void __iomem *reg = imx21->regs + offset;
  78         writel(readl(reg) | mask, reg);
  79 }
  80 
  81 static inline void clear_register_bits(struct imx21 *imx21,
  82         u32 offset, u32 mask)
  83 {
  84         void __iomem *reg = imx21->regs + offset;
  85         writel(readl(reg) & ~mask, reg);
  86 }
  87 
  88 static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
  89 {
  90         void __iomem *reg = imx21->regs + offset;
  91 
  92         if (readl(reg) & mask)
  93                 writel(mask, reg);
  94 }
  95 
  96 static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
  97 {
  98         void __iomem *reg = imx21->regs + offset;
  99 
 100         if (!(readl(reg) & mask))
 101                 writel(mask, reg);
 102 }
 103 
 104 static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
 105 {
 106         writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
 107 }
 108 
 109 static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
 110 {
 111         return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
 112 }
 113 
 114 static inline int wrap_frame(int counter)
 115 {
 116         return counter & 0xFFFF;
 117 }
 118 
 119 static inline int frame_after(int frame, int after)
 120 {
 121         /* handle wrapping like jiffies time_afer */
 122         return (s16)((s16)after - (s16)frame) < 0;
 123 }
 124 
 125 static int imx21_hc_get_frame(struct usb_hcd *hcd)
 126 {
 127         struct imx21 *imx21 = hcd_to_imx21(hcd);
 128 
 129         return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
 130 }
 131 
 132 static inline bool unsuitable_for_dma(dma_addr_t addr)
 133 {
 134         return (addr & 3) != 0;
 135 }
 136 
 137 #include "imx21-dbg.c"
 138 
 139 static void nonisoc_urb_completed_for_etd(
 140         struct imx21 *imx21, struct etd_priv *etd, int status);
 141 static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
 142 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
 143 
 144 /* =========================================== */
 145 /* ETD management                               */
 146 /* ===========================================  */
 147 
 148 static int alloc_etd(struct imx21 *imx21)
 149 {
 150         int i;
 151         struct etd_priv *etd = imx21->etd;
 152 
 153         for (i = 0; i < USB_NUM_ETD; i++, etd++) {
 154                 if (etd->alloc == 0) {
 155                         memset(etd, 0, sizeof(imx21->etd[0]));
 156                         etd->alloc = 1;
 157                         debug_etd_allocated(imx21);
 158                         return i;
 159                 }
 160         }
 161         return -1;
 162 }
 163 
 164 static void disactivate_etd(struct imx21 *imx21, int num)
 165 {
 166         int etd_mask = (1 << num);
 167         struct etd_priv *etd = &imx21->etd[num];
 168 
 169         writel(etd_mask, imx21->regs + USBH_ETDENCLR);
 170         clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
 171         writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
 172         clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
 173 
 174         etd->active_count = 0;
 175 
 176         DEBUG_LOG_FRAME(imx21, etd, disactivated);
 177 }
 178 
 179 static void reset_etd(struct imx21 *imx21, int num)
 180 {
 181         struct etd_priv *etd = imx21->etd + num;
 182         int i;
 183 
 184         disactivate_etd(imx21, num);
 185 
 186         for (i = 0; i < 4; i++)
 187                 etd_writel(imx21, num, i, 0);
 188         etd->urb = NULL;
 189         etd->ep = NULL;
 190         etd->td = NULL;
 191         etd->bounce_buffer = NULL;
 192 }
 193 
 194 static void free_etd(struct imx21 *imx21, int num)
 195 {
 196         if (num < 0)
 197                 return;
 198 
 199         if (num >= USB_NUM_ETD) {
 200                 dev_err(imx21->dev, "BAD etd=%d!\n", num);
 201                 return;
 202         }
 203         if (imx21->etd[num].alloc == 0) {
 204                 dev_err(imx21->dev, "ETD %d already free!\n", num);
 205                 return;
 206         }
 207 
 208         debug_etd_freed(imx21);
 209         reset_etd(imx21, num);
 210         memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
 211 }
 212 
 213 
 214 static void setup_etd_dword0(struct imx21 *imx21,
 215         int etd_num, struct urb *urb,  u8 dir, u16 maxpacket)
 216 {
 217         etd_writel(imx21, etd_num, 0,
 218                 ((u32) usb_pipedevice(urb->pipe)) <<  DW0_ADDRESS |
 219                 ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
 220                 ((u32) dir << DW0_DIRECT) |
 221                 ((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
 222                         1 : 0) << DW0_SPEED) |
 223                 ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
 224                 ((u32) maxpacket << DW0_MAXPKTSIZ));
 225 }
 226 
 227 /**
 228  * Copy buffer to data controller data memory.
 229  * We cannot use memcpy_toio() because the hardware requires 32bit writes
 230  */
 231 static void copy_to_dmem(
 232         struct imx21 *imx21, int dmem_offset, void *src, int count)
 233 {
 234         void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset;
 235         u32 word = 0;
 236         u8 *p = src;
 237         int byte = 0;
 238         int i;
 239 
 240         for (i = 0; i < count; i++) {
 241                 byte = i % 4;
 242                 word += (*p++ << (byte * 8));
 243                 if (byte == 3) {
 244                         writel(word, dmem);
 245                         dmem += 4;
 246                         word = 0;
 247                 }
 248         }
 249 
 250         if (count && byte != 3)
 251                 writel(word, dmem);
 252 }
 253 
 254 static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir)
 255 {
 256         u32 etd_mask = 1 << etd_num;
 257         struct etd_priv *etd = &imx21->etd[etd_num];
 258 
 259         if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) {
 260                 /* For non aligned isoc the condition below is always true */
 261                 if (etd->len <= etd->dmem_size) {
 262                         /* Fits into data memory, use PIO */
 263                         if (dir != TD_DIR_IN) {
 264                                 copy_to_dmem(imx21,
 265                                                 etd->dmem_offset,
 266                                                 etd->cpu_buffer, etd->len);
 267                         }
 268                         etd->dma_handle = 0;
 269 
 270                 } else {
 271                         /* Too big for data memory, use bounce buffer */
 272                         enum dma_data_direction dmadir;
 273 
 274                         if (dir == TD_DIR_IN) {
 275                                 dmadir = DMA_FROM_DEVICE;
 276                                 etd->bounce_buffer = kmalloc(etd->len,
 277                                                                 GFP_ATOMIC);
 278                         } else {
 279                                 dmadir = DMA_TO_DEVICE;
 280                                 etd->bounce_buffer = kmemdup(etd->cpu_buffer,
 281                                                                 etd->len,
 282                                                                 GFP_ATOMIC);
 283                         }
 284                         if (!etd->bounce_buffer) {
 285                                 dev_err(imx21->dev, "failed bounce alloc\n");
 286                                 goto err_bounce_alloc;
 287                         }
 288 
 289                         etd->dma_handle =
 290                                 dma_map_single(imx21->dev,
 291                                                 etd->bounce_buffer,
 292                                                 etd->len,
 293                                                 dmadir);
 294                         if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
 295                                 dev_err(imx21->dev, "failed bounce map\n");
 296                                 goto err_bounce_map;
 297                         }
 298                 }
 299         }
 300 
 301         clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
 302         set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
 303         clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
 304         clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
 305 
 306         if (etd->dma_handle) {
 307                 set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
 308                 clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
 309                 clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
 310                 writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
 311                 set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
 312         } else {
 313                 if (dir != TD_DIR_IN) {
 314                         /* need to set for ZLP and PIO */
 315                         set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
 316                         set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
 317                 }
 318         }
 319 
 320         DEBUG_LOG_FRAME(imx21, etd, activated);
 321 
 322 #ifdef DEBUG
 323         if (!etd->active_count) {
 324                 int i;
 325                 etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
 326                 etd->disactivated_frame = -1;
 327                 etd->last_int_frame = -1;
 328                 etd->last_req_frame = -1;
 329 
 330                 for (i = 0; i < 4; i++)
 331                         etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
 332         }
 333 #endif
 334 
 335         etd->active_count = 1;
 336         writel(etd_mask, imx21->regs + USBH_ETDENSET);
 337         return;
 338 
 339 err_bounce_map:
 340         kfree(etd->bounce_buffer);
 341 
 342 err_bounce_alloc:
 343         free_dmem(imx21, etd);
 344         nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
 345 }
 346 
 347 /* ===========================================  */
 348 /* Data memory management                       */
 349 /* ===========================================  */
 350 
 351 static int alloc_dmem(struct imx21 *imx21, unsigned int size,
 352                       struct usb_host_endpoint *ep)
 353 {
 354         unsigned int offset = 0;
 355         struct imx21_dmem_area *area;
 356         struct imx21_dmem_area *tmp;
 357 
 358         size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
 359 
 360         if (size > DMEM_SIZE) {
 361                 dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
 362                         size, DMEM_SIZE);
 363                 return -EINVAL;
 364         }
 365 
 366         list_for_each_entry(tmp, &imx21->dmem_list, list) {
 367                 if ((size + offset) < offset)
 368                         goto fail;
 369                 if ((size + offset) <= tmp->offset)
 370                         break;
 371                 offset = tmp->size + tmp->offset;
 372                 if ((offset + size) > DMEM_SIZE)
 373                         goto fail;
 374         }
 375 
 376         area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
 377         if (area == NULL)
 378                 return -ENOMEM;
 379 
 380         area->ep = ep;
 381         area->offset = offset;
 382         area->size = size;
 383         list_add_tail(&area->list, &tmp->list);
 384         debug_dmem_allocated(imx21, size);
 385         return offset;
 386 
 387 fail:
 388         return -ENOMEM;
 389 }
 390 
 391 /* Memory now available for a queued ETD - activate it */
 392 static void activate_queued_etd(struct imx21 *imx21,
 393         struct etd_priv *etd, u32 dmem_offset)
 394 {
 395         struct urb_priv *urb_priv = etd->urb->hcpriv;
 396         int etd_num = etd - &imx21->etd[0];
 397         u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
 398         u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
 399 
 400         dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
 401                 etd_num);
 402         etd_writel(imx21, etd_num, 1,
 403             ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
 404 
 405         etd->dmem_offset = dmem_offset;
 406         urb_priv->active = 1;
 407         activate_etd(imx21, etd_num, dir);
 408 }
 409 
 410 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
 411 {
 412         struct imx21_dmem_area *area;
 413         struct etd_priv *tmp;
 414         int found = 0;
 415         int offset;
 416 
 417         if (!etd->dmem_size)
 418                 return;
 419         etd->dmem_size = 0;
 420 
 421         offset = etd->dmem_offset;
 422         list_for_each_entry(area, &imx21->dmem_list, list) {
 423                 if (area->offset == offset) {
 424                         debug_dmem_freed(imx21, area->size);
 425                         list_del(&area->list);
 426                         kfree(area);
 427                         found = 1;
 428                         break;
 429                 }
 430         }
 431 
 432         if (!found)  {
 433                 dev_err(imx21->dev,
 434                         "Trying to free unallocated DMEM %d\n", offset);
 435                 return;
 436         }
 437 
 438         /* Try again to allocate memory for anything we've queued */
 439         list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
 440                 offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
 441                 if (offset >= 0) {
 442                         list_del(&etd->queue);
 443                         activate_queued_etd(imx21, etd, (u32)offset);
 444                 }
 445         }
 446 }
 447 
 448 static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
 449 {
 450         struct imx21_dmem_area *area, *tmp;
 451 
 452         list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
 453                 if (area->ep == ep) {
 454                         dev_err(imx21->dev,
 455                                 "Active DMEM %d for disabled ep=%p\n",
 456                                 area->offset, ep);
 457                         list_del(&area->list);
 458                         kfree(area);
 459                 }
 460         }
 461 }
 462 
 463 
 464 /* ===========================================  */
 465 /* End handling                                 */
 466 /* ===========================================  */
 467 
 468 /* Endpoint now idle - release its ETD(s) or assign to queued request */
 469 static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
 470 {
 471         int i;
 472 
 473         for (i = 0; i < NUM_ISO_ETDS; i++) {
 474                 int etd_num = ep_priv->etd[i];
 475                 struct etd_priv *etd;
 476                 if (etd_num < 0)
 477                         continue;
 478 
 479                 etd = &imx21->etd[etd_num];
 480                 ep_priv->etd[i] = -1;
 481 
 482                 free_dmem(imx21, etd); /* for isoc */
 483 
 484                 if (list_empty(&imx21->queue_for_etd)) {
 485                         free_etd(imx21, etd_num);
 486                         continue;
 487                 }
 488 
 489                 dev_dbg(imx21->dev,
 490                         "assigning idle etd %d for queued request\n", etd_num);
 491                 ep_priv = list_first_entry(&imx21->queue_for_etd,
 492                         struct ep_priv, queue);
 493                 list_del(&ep_priv->queue);
 494                 reset_etd(imx21, etd_num);
 495                 ep_priv->waiting_etd = 0;
 496                 ep_priv->etd[i] = etd_num;
 497 
 498                 if (list_empty(&ep_priv->ep->urb_list)) {
 499                         dev_err(imx21->dev, "No urb for queued ep!\n");
 500                         continue;
 501                 }
 502                 schedule_nonisoc_etd(imx21, list_first_entry(
 503                         &ep_priv->ep->urb_list, struct urb, urb_list));
 504         }
 505 }
 506 
 507 static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
 508 __releases(imx21->lock)
 509 __acquires(imx21->lock)
 510 {
 511         struct imx21 *imx21 = hcd_to_imx21(hcd);
 512         struct ep_priv *ep_priv = urb->ep->hcpriv;
 513         struct urb_priv *urb_priv = urb->hcpriv;
 514 
 515         debug_urb_completed(imx21, urb, status);
 516         dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
 517 
 518         kfree(urb_priv->isoc_td);
 519         kfree(urb->hcpriv);
 520         urb->hcpriv = NULL;
 521         usb_hcd_unlink_urb_from_ep(hcd, urb);
 522         spin_unlock(&imx21->lock);
 523         usb_hcd_giveback_urb(hcd, urb, status);
 524         spin_lock(&imx21->lock);
 525         if (list_empty(&ep_priv->ep->urb_list))
 526                 ep_idle(imx21, ep_priv);
 527 }
 528 
 529 static void nonisoc_urb_completed_for_etd(
 530         struct imx21 *imx21, struct etd_priv *etd, int status)
 531 {
 532         struct usb_host_endpoint *ep = etd->ep;
 533 
 534         urb_done(imx21->hcd, etd->urb, status);
 535         etd->urb = NULL;
 536 
 537         if (!list_empty(&ep->urb_list)) {
 538                 struct urb *urb = list_first_entry(
 539                                         &ep->urb_list, struct urb, urb_list);
 540 
 541                 dev_vdbg(imx21->dev, "next URB %p\n", urb);
 542                 schedule_nonisoc_etd(imx21, urb);
 543         }
 544 }
 545 
 546 
 547 /* ===========================================  */
 548 /* ISOC Handling ...                            */
 549 /* ===========================================  */
 550 
 551 static void schedule_isoc_etds(struct usb_hcd *hcd,
 552         struct usb_host_endpoint *ep)
 553 {
 554         struct imx21 *imx21 = hcd_to_imx21(hcd);
 555         struct ep_priv *ep_priv = ep->hcpriv;
 556         struct etd_priv *etd;
 557         struct urb_priv *urb_priv;
 558         struct td *td;
 559         int etd_num;
 560         int i;
 561         int cur_frame;
 562         u8 dir;
 563 
 564         for (i = 0; i < NUM_ISO_ETDS; i++) {
 565 too_late:
 566                 if (list_empty(&ep_priv->td_list))
 567                         break;
 568 
 569                 etd_num = ep_priv->etd[i];
 570                 if (etd_num < 0)
 571                         break;
 572 
 573                 etd = &imx21->etd[etd_num];
 574                 if (etd->urb)
 575                         continue;
 576 
 577                 td = list_entry(ep_priv->td_list.next, struct td, list);
 578                 list_del(&td->list);
 579                 urb_priv = td->urb->hcpriv;
 580 
 581                 cur_frame = imx21_hc_get_frame(hcd);
 582                 if (frame_after(cur_frame, td->frame)) {
 583                         dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
 584                                 cur_frame, td->frame);
 585                         urb_priv->isoc_status = -EXDEV;
 586                         td->urb->iso_frame_desc[
 587                                 td->isoc_index].actual_length = 0;
 588                         td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
 589                         if (--urb_priv->isoc_remaining == 0)
 590                                 urb_done(hcd, td->urb, urb_priv->isoc_status);
 591                         goto too_late;
 592                 }
 593 
 594                 urb_priv->active = 1;
 595                 etd->td = td;
 596                 etd->ep = td->ep;
 597                 etd->urb = td->urb;
 598                 etd->len = td->len;
 599                 etd->dma_handle = td->dma_handle;
 600                 etd->cpu_buffer = td->cpu_buffer;
 601 
 602                 debug_isoc_submitted(imx21, cur_frame, td);
 603 
 604                 dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
 605                 setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
 606                 etd_writel(imx21, etd_num, 1, etd->dmem_offset);
 607                 etd_writel(imx21, etd_num, 2,
 608                         (TD_NOTACCESSED << DW2_COMPCODE) |
 609                         ((td->frame & 0xFFFF) << DW2_STARTFRM));
 610                 etd_writel(imx21, etd_num, 3,
 611                         (TD_NOTACCESSED << DW3_COMPCODE0) |
 612                         (td->len << DW3_PKTLEN0));
 613 
 614                 activate_etd(imx21, etd_num, dir);
 615         }
 616 }
 617 
 618 static void isoc_etd_done(struct usb_hcd *hcd, int etd_num)
 619 {
 620         struct imx21 *imx21 = hcd_to_imx21(hcd);
 621         int etd_mask = 1 << etd_num;
 622         struct etd_priv *etd = imx21->etd + etd_num;
 623         struct urb *urb = etd->urb;
 624         struct urb_priv *urb_priv = urb->hcpriv;
 625         struct td *td = etd->td;
 626         struct usb_host_endpoint *ep = etd->ep;
 627         int isoc_index = td->isoc_index;
 628         unsigned int pipe = urb->pipe;
 629         int dir_in = usb_pipein(pipe);
 630         int cc;
 631         int bytes_xfrd;
 632 
 633         disactivate_etd(imx21, etd_num);
 634 
 635         cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
 636         bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
 637 
 638         /* Input doesn't always fill the buffer, don't generate an error
 639          * when this happens.
 640          */
 641         if (dir_in && (cc == TD_DATAUNDERRUN))
 642                 cc = TD_CC_NOERROR;
 643 
 644         if (cc == TD_NOTACCESSED)
 645                 bytes_xfrd = 0;
 646 
 647         debug_isoc_completed(imx21,
 648                 imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
 649         if (cc) {
 650                 urb_priv->isoc_status = -EXDEV;
 651                 dev_dbg(imx21->dev,
 652                         "bad iso cc=0x%X frame=%d sched frame=%d "
 653                         "cnt=%d len=%d urb=%p etd=%d index=%d\n",
 654                         cc,  imx21_hc_get_frame(hcd), td->frame,
 655                         bytes_xfrd, td->len, urb, etd_num, isoc_index);
 656         }
 657 
 658         if (dir_in) {
 659                 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
 660                 if (!etd->dma_handle)
 661                         memcpy_fromio(etd->cpu_buffer,
 662                                 imx21->regs + USBOTG_DMEM + etd->dmem_offset,
 663                                 bytes_xfrd);
 664         }
 665 
 666         urb->actual_length += bytes_xfrd;
 667         urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
 668         urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
 669 
 670         etd->td = NULL;
 671         etd->urb = NULL;
 672         etd->ep = NULL;
 673 
 674         if (--urb_priv->isoc_remaining == 0)
 675                 urb_done(hcd, urb, urb_priv->isoc_status);
 676 
 677         schedule_isoc_etds(hcd, ep);
 678 }
 679 
 680 static struct ep_priv *alloc_isoc_ep(
 681         struct imx21 *imx21, struct usb_host_endpoint *ep)
 682 {
 683         struct ep_priv *ep_priv;
 684         int i;
 685 
 686         ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
 687         if (!ep_priv)
 688                 return NULL;
 689 
 690         for (i = 0; i < NUM_ISO_ETDS; i++)
 691                 ep_priv->etd[i] = -1;
 692 
 693         INIT_LIST_HEAD(&ep_priv->td_list);
 694         ep_priv->ep = ep;
 695         ep->hcpriv = ep_priv;
 696         return ep_priv;
 697 }
 698 
 699 static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv)
 700 {
 701         int i, j;
 702         int etd_num;
 703 
 704         /* Allocate the ETDs if required */
 705         for (i = 0; i < NUM_ISO_ETDS; i++) {
 706                 if (ep_priv->etd[i] < 0) {
 707                         etd_num = alloc_etd(imx21);
 708                         if (etd_num < 0)
 709                                 goto alloc_etd_failed;
 710 
 711                         ep_priv->etd[i] = etd_num;
 712                         imx21->etd[etd_num].ep = ep_priv->ep;
 713                 }
 714         }
 715         return 0;
 716 
 717 alloc_etd_failed:
 718         dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
 719         for (j = 0; j < i; j++) {
 720                 free_etd(imx21, ep_priv->etd[j]);
 721                 ep_priv->etd[j] = -1;
 722         }
 723         return -ENOMEM;
 724 }
 725 
 726 static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
 727                                      struct usb_host_endpoint *ep,
 728                                      struct urb *urb, gfp_t mem_flags)
 729 {
 730         struct imx21 *imx21 = hcd_to_imx21(hcd);
 731         struct urb_priv *urb_priv;
 732         unsigned long flags;
 733         struct ep_priv *ep_priv;
 734         struct td *td = NULL;
 735         int i;
 736         int ret;
 737         int cur_frame;
 738         u16 maxpacket;
 739 
 740         urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
 741         if (urb_priv == NULL)
 742                 return -ENOMEM;
 743 
 744         urb_priv->isoc_td = kcalloc(urb->number_of_packets, sizeof(struct td),
 745                                     mem_flags);
 746         if (urb_priv->isoc_td == NULL) {
 747                 ret = -ENOMEM;
 748                 goto alloc_td_failed;
 749         }
 750 
 751         spin_lock_irqsave(&imx21->lock, flags);
 752 
 753         if (ep->hcpriv == NULL) {
 754                 ep_priv = alloc_isoc_ep(imx21, ep);
 755                 if (ep_priv == NULL) {
 756                         ret = -ENOMEM;
 757                         goto alloc_ep_failed;
 758                 }
 759         } else {
 760                 ep_priv = ep->hcpriv;
 761         }
 762 
 763         ret = alloc_isoc_etds(imx21, ep_priv);
 764         if (ret)
 765                 goto alloc_etd_failed;
 766 
 767         ret = usb_hcd_link_urb_to_ep(hcd, urb);
 768         if (ret)
 769                 goto link_failed;
 770 
 771         urb->status = -EINPROGRESS;
 772         urb->actual_length = 0;
 773         urb->error_count = 0;
 774         urb->hcpriv = urb_priv;
 775         urb_priv->ep = ep;
 776 
 777         /* allocate data memory for largest packets if not already done */
 778         maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
 779         for (i = 0; i < NUM_ISO_ETDS; i++) {
 780                 struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
 781 
 782                 if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
 783                         /* not sure if this can really occur.... */
 784                         dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
 785                                 etd->dmem_size, maxpacket);
 786                         ret = -EMSGSIZE;
 787                         goto alloc_dmem_failed;
 788                 }
 789 
 790                 if (etd->dmem_size == 0) {
 791                         etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
 792                         if (etd->dmem_offset < 0) {
 793                                 dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
 794                                 ret = -EAGAIN;
 795                                 goto alloc_dmem_failed;
 796                         }
 797                         etd->dmem_size = maxpacket;
 798                 }
 799         }
 800 
 801         /* calculate frame */
 802         cur_frame = imx21_hc_get_frame(hcd);
 803         i = 0;
 804         if (list_empty(&ep_priv->td_list)) {
 805                 urb->start_frame = wrap_frame(cur_frame + 5);
 806         } else {
 807                 urb->start_frame = wrap_frame(list_entry(ep_priv->td_list.prev,
 808                                 struct td, list)->frame + urb->interval);
 809 
 810                 if (frame_after(cur_frame, urb->start_frame)) {
 811                         dev_dbg(imx21->dev,
 812                                 "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
 813                                 urb->start_frame, cur_frame,
 814                                 (urb->transfer_flags & URB_ISO_ASAP) != 0);
 815                         i = DIV_ROUND_UP(wrap_frame(
 816                                         cur_frame - urb->start_frame),
 817                                         urb->interval);
 818 
 819                         /* Treat underruns as if URB_ISO_ASAP was set */
 820                         if ((urb->transfer_flags & URB_ISO_ASAP) ||
 821                                         i >= urb->number_of_packets) {
 822                                 urb->start_frame = wrap_frame(urb->start_frame
 823                                                 + i * urb->interval);
 824                                 i = 0;
 825                         }
 826                 }
 827         }
 828 
 829         /* set up transfers */
 830         urb_priv->isoc_remaining = urb->number_of_packets - i;
 831         td = urb_priv->isoc_td;
 832         for (; i < urb->number_of_packets; i++, td++) {
 833                 unsigned int offset = urb->iso_frame_desc[i].offset;
 834                 td->ep = ep;
 835                 td->urb = urb;
 836                 td->len = urb->iso_frame_desc[i].length;
 837                 td->isoc_index = i;
 838                 td->frame = wrap_frame(urb->start_frame + urb->interval * i);
 839                 td->dma_handle = urb->transfer_dma + offset;
 840                 td->cpu_buffer = urb->transfer_buffer + offset;
 841                 list_add_tail(&td->list, &ep_priv->td_list);
 842         }
 843 
 844         dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
 845                 urb->number_of_packets, urb->start_frame, td->frame);
 846 
 847         debug_urb_submitted(imx21, urb);
 848         schedule_isoc_etds(hcd, ep);
 849 
 850         spin_unlock_irqrestore(&imx21->lock, flags);
 851         return 0;
 852 
 853 alloc_dmem_failed:
 854         usb_hcd_unlink_urb_from_ep(hcd, urb);
 855 
 856 link_failed:
 857 alloc_etd_failed:
 858 alloc_ep_failed:
 859         spin_unlock_irqrestore(&imx21->lock, flags);
 860         kfree(urb_priv->isoc_td);
 861 
 862 alloc_td_failed:
 863         kfree(urb_priv);
 864         return ret;
 865 }
 866 
 867 static void dequeue_isoc_urb(struct imx21 *imx21,
 868         struct urb *urb, struct ep_priv *ep_priv)
 869 {
 870         struct urb_priv *urb_priv = urb->hcpriv;
 871         struct td *td, *tmp;
 872         int i;
 873 
 874         if (urb_priv->active) {
 875                 for (i = 0; i < NUM_ISO_ETDS; i++) {
 876                         int etd_num = ep_priv->etd[i];
 877                         if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
 878                                 struct etd_priv *etd = imx21->etd + etd_num;
 879 
 880                                 reset_etd(imx21, etd_num);
 881                                 free_dmem(imx21, etd);
 882                         }
 883                 }
 884         }
 885 
 886         list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
 887                 if (td->urb == urb) {
 888                         dev_vdbg(imx21->dev, "removing td %p\n", td);
 889                         list_del(&td->list);
 890                 }
 891         }
 892 }
 893 
 894 /* =========================================== */
 895 /* NON ISOC Handling ...                        */
 896 /* =========================================== */
 897 
 898 static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
 899 {
 900         unsigned int pipe = urb->pipe;
 901         struct urb_priv *urb_priv = urb->hcpriv;
 902         struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
 903         int state = urb_priv->state;
 904         int etd_num = ep_priv->etd[0];
 905         struct etd_priv *etd;
 906         u32 count;
 907         u16 etd_buf_size;
 908         u16 maxpacket;
 909         u8 dir;
 910         u8 bufround;
 911         u8 datatoggle;
 912         u8 interval = 0;
 913         u8 relpolpos = 0;
 914 
 915         if (etd_num < 0) {
 916                 dev_err(imx21->dev, "No valid ETD\n");
 917                 return;
 918         }
 919         if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
 920                 dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
 921 
 922         etd = &imx21->etd[etd_num];
 923         maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
 924         if (!maxpacket)
 925                 maxpacket = 8;
 926 
 927         if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
 928                 if (state == US_CTRL_SETUP) {
 929                         dir = TD_DIR_SETUP;
 930                         if (unsuitable_for_dma(urb->setup_dma))
 931                                 usb_hcd_unmap_urb_setup_for_dma(imx21->hcd,
 932                                         urb);
 933                         etd->dma_handle = urb->setup_dma;
 934                         etd->cpu_buffer = urb->setup_packet;
 935                         bufround = 0;
 936                         count = 8;
 937                         datatoggle = TD_TOGGLE_DATA0;
 938                 } else {        /* US_CTRL_ACK */
 939                         dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
 940                         bufround = 0;
 941                         count = 0;
 942                         datatoggle = TD_TOGGLE_DATA1;
 943                 }
 944         } else {
 945                 dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
 946                 bufround = (dir == TD_DIR_IN) ? 1 : 0;
 947                 if (unsuitable_for_dma(urb->transfer_dma))
 948                         usb_hcd_unmap_urb_for_dma(imx21->hcd, urb);
 949 
 950                 etd->dma_handle = urb->transfer_dma;
 951                 etd->cpu_buffer = urb->transfer_buffer;
 952                 if (usb_pipebulk(pipe) && (state == US_BULK0))
 953                         count = 0;
 954                 else
 955                         count = urb->transfer_buffer_length;
 956 
 957                 if (usb_pipecontrol(pipe)) {
 958                         datatoggle = TD_TOGGLE_DATA1;
 959                 } else {
 960                         if (usb_gettoggle(
 961                                         urb->dev,
 962                                         usb_pipeendpoint(urb->pipe),
 963                                         usb_pipeout(urb->pipe)))
 964                                 datatoggle = TD_TOGGLE_DATA1;
 965                         else
 966                                 datatoggle = TD_TOGGLE_DATA0;
 967                 }
 968         }
 969 
 970         etd->urb = urb;
 971         etd->ep = urb_priv->ep;
 972         etd->len = count;
 973 
 974         if (usb_pipeint(pipe)) {
 975                 interval = urb->interval;
 976                 relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
 977         }
 978 
 979         /* Write ETD to device memory */
 980         setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
 981 
 982         etd_writel(imx21, etd_num, 2,
 983                 (u32) interval << DW2_POLINTERV |
 984                 ((u32) relpolpos << DW2_RELPOLPOS) |
 985                 ((u32) dir << DW2_DIRPID) |
 986                 ((u32) bufround << DW2_BUFROUND) |
 987                 ((u32) datatoggle << DW2_DATATOG) |
 988                 ((u32) TD_NOTACCESSED << DW2_COMPCODE));
 989 
 990         /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
 991            is smaller. Make sure we don't overrun the buffer!
 992          */
 993         if (count && count < maxpacket)
 994                 etd_buf_size = count;
 995         else
 996                 etd_buf_size = maxpacket;
 997 
 998         etd_writel(imx21, etd_num, 3,
 999                 ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
1000 
1001         if (!count)
1002                 etd->dma_handle = 0;
1003 
1004         /* allocate x and y buffer space at once */
1005         etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
1006         etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
1007         if (etd->dmem_offset < 0) {
1008                 /* Setup everything we can in HW and update when we get DMEM */
1009                 etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
1010 
1011                 dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
1012                 debug_urb_queued_for_dmem(imx21, urb);
1013                 list_add_tail(&etd->queue, &imx21->queue_for_dmem);
1014                 return;
1015         }
1016 
1017         etd_writel(imx21, etd_num, 1,
1018                 (((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
1019                 (u32) etd->dmem_offset);
1020 
1021         urb_priv->active = 1;
1022 
1023         /* enable the ETD to kick off transfer */
1024         dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
1025                 etd_num, count, dir != TD_DIR_IN ? "out" : "in");
1026         activate_etd(imx21, etd_num, dir);
1027 
1028 }
1029 
1030 static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num)
1031 {
1032         struct imx21 *imx21 = hcd_to_imx21(hcd);
1033         struct etd_priv *etd = &imx21->etd[etd_num];
1034         struct urb *urb = etd->urb;
1035         u32 etd_mask = 1 << etd_num;
1036         struct urb_priv *urb_priv = urb->hcpriv;
1037         int dir;
1038         int cc;
1039         u32 bytes_xfrd;
1040         int etd_done;
1041 
1042         disactivate_etd(imx21, etd_num);
1043 
1044         dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
1045         cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
1046         bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
1047 
1048         /* save toggle carry */
1049         usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1050                       usb_pipeout(urb->pipe),
1051                       (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
1052 
1053         if (dir == TD_DIR_IN) {
1054                 clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
1055                 clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
1056 
1057                 if (etd->bounce_buffer) {
1058                         memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
1059                         dma_unmap_single(imx21->dev,
1060                                 etd->dma_handle, etd->len, DMA_FROM_DEVICE);
1061                 } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */
1062                         memcpy_fromio(etd->cpu_buffer,
1063                                 imx21->regs + USBOTG_DMEM + etd->dmem_offset,
1064                                 bytes_xfrd);
1065                 }
1066         }
1067 
1068         kfree(etd->bounce_buffer);
1069         etd->bounce_buffer = NULL;
1070         free_dmem(imx21, etd);
1071 
1072         urb->error_count = 0;
1073         if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
1074                         && (cc == TD_DATAUNDERRUN))
1075                 cc = TD_CC_NOERROR;
1076 
1077         if (cc != 0)
1078                 dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
1079 
1080         etd_done = (cc_to_error[cc] != 0);      /* stop if error */
1081 
1082         switch (usb_pipetype(urb->pipe)) {
1083         case PIPE_CONTROL:
1084                 switch (urb_priv->state) {
1085                 case US_CTRL_SETUP:
1086                         if (urb->transfer_buffer_length > 0)
1087                                 urb_priv->state = US_CTRL_DATA;
1088                         else
1089                                 urb_priv->state = US_CTRL_ACK;
1090                         break;
1091                 case US_CTRL_DATA:
1092                         urb->actual_length += bytes_xfrd;
1093                         urb_priv->state = US_CTRL_ACK;
1094                         break;
1095                 case US_CTRL_ACK:
1096                         etd_done = 1;
1097                         break;
1098                 default:
1099                         dev_err(imx21->dev,
1100                                 "Invalid pipe state %d\n", urb_priv->state);
1101                         etd_done = 1;
1102                         break;
1103                 }
1104                 break;
1105 
1106         case PIPE_BULK:
1107                 urb->actual_length += bytes_xfrd;
1108                 if ((urb_priv->state == US_BULK)
1109                     && (urb->transfer_flags & URB_ZERO_PACKET)
1110                     && urb->transfer_buffer_length > 0
1111                     && ((urb->transfer_buffer_length %
1112                          usb_maxpacket(urb->dev, urb->pipe,
1113                                        usb_pipeout(urb->pipe))) == 0)) {
1114                         /* need a 0-packet */
1115                         urb_priv->state = US_BULK0;
1116                 } else {
1117                         etd_done = 1;
1118                 }
1119                 break;
1120 
1121         case PIPE_INTERRUPT:
1122                 urb->actual_length += bytes_xfrd;
1123                 etd_done = 1;
1124                 break;
1125         }
1126 
1127         if (etd_done)
1128                 nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
1129         else {
1130                 dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
1131                 schedule_nonisoc_etd(imx21, urb);
1132         }
1133 }
1134 
1135 
1136 static struct ep_priv *alloc_ep(void)
1137 {
1138         int i;
1139         struct ep_priv *ep_priv;
1140 
1141         ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
1142         if (!ep_priv)
1143                 return NULL;
1144 
1145         for (i = 0; i < NUM_ISO_ETDS; ++i)
1146                 ep_priv->etd[i] = -1;
1147 
1148         return ep_priv;
1149 }
1150 
1151 static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
1152                                 struct urb *urb, gfp_t mem_flags)
1153 {
1154         struct imx21 *imx21 = hcd_to_imx21(hcd);
1155         struct usb_host_endpoint *ep = urb->ep;
1156         struct urb_priv *urb_priv;
1157         struct ep_priv *ep_priv;
1158         struct etd_priv *etd;
1159         int ret;
1160         unsigned long flags;
1161 
1162         dev_vdbg(imx21->dev,
1163                 "enqueue urb=%p ep=%p len=%d "
1164                 "buffer=%p dma=%pad setupBuf=%p setupDma=%pad\n",
1165                 urb, ep,
1166                 urb->transfer_buffer_length,
1167                 urb->transfer_buffer, &urb->transfer_dma,
1168                 urb->setup_packet, &urb->setup_dma);
1169 
1170         if (usb_pipeisoc(urb->pipe))
1171                 return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
1172 
1173         urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
1174         if (!urb_priv)
1175                 return -ENOMEM;
1176 
1177         spin_lock_irqsave(&imx21->lock, flags);
1178 
1179         ep_priv = ep->hcpriv;
1180         if (ep_priv == NULL) {
1181                 ep_priv = alloc_ep();
1182                 if (!ep_priv) {
1183                         ret = -ENOMEM;
1184                         goto failed_alloc_ep;
1185                 }
1186                 ep->hcpriv = ep_priv;
1187                 ep_priv->ep = ep;
1188         }
1189 
1190         ret = usb_hcd_link_urb_to_ep(hcd, urb);
1191         if (ret)
1192                 goto failed_link;
1193 
1194         urb->status = -EINPROGRESS;
1195         urb->actual_length = 0;
1196         urb->error_count = 0;
1197         urb->hcpriv = urb_priv;
1198         urb_priv->ep = ep;
1199 
1200         switch (usb_pipetype(urb->pipe)) {
1201         case PIPE_CONTROL:
1202                 urb_priv->state = US_CTRL_SETUP;
1203                 break;
1204         case PIPE_BULK:
1205                 urb_priv->state = US_BULK;
1206                 break;
1207         }
1208 
1209         debug_urb_submitted(imx21, urb);
1210         if (ep_priv->etd[0] < 0) {
1211                 if (ep_priv->waiting_etd) {
1212                         dev_dbg(imx21->dev,
1213                                 "no ETD available already queued %p\n",
1214                                 ep_priv);
1215                         debug_urb_queued_for_etd(imx21, urb);
1216                         goto out;
1217                 }
1218                 ep_priv->etd[0] = alloc_etd(imx21);
1219                 if (ep_priv->etd[0] < 0) {
1220                         dev_dbg(imx21->dev,
1221                                 "no ETD available queueing %p\n", ep_priv);
1222                         debug_urb_queued_for_etd(imx21, urb);
1223                         list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
1224                         ep_priv->waiting_etd = 1;
1225                         goto out;
1226                 }
1227         }
1228 
1229         /* Schedule if no URB already active for this endpoint */
1230         etd = &imx21->etd[ep_priv->etd[0]];
1231         if (etd->urb == NULL) {
1232                 DEBUG_LOG_FRAME(imx21, etd, last_req);
1233                 schedule_nonisoc_etd(imx21, urb);
1234         }
1235 
1236 out:
1237         spin_unlock_irqrestore(&imx21->lock, flags);
1238         return 0;
1239 
1240 failed_link:
1241 failed_alloc_ep:
1242         spin_unlock_irqrestore(&imx21->lock, flags);
1243         kfree(urb_priv);
1244         return ret;
1245 }
1246 
1247 static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1248                                 int status)
1249 {
1250         struct imx21 *imx21 = hcd_to_imx21(hcd);
1251         unsigned long flags;
1252         struct usb_host_endpoint *ep;
1253         struct ep_priv *ep_priv;
1254         struct urb_priv *urb_priv = urb->hcpriv;
1255         int ret = -EINVAL;
1256 
1257         dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
1258                 urb, usb_pipeisoc(urb->pipe), status);
1259 
1260         spin_lock_irqsave(&imx21->lock, flags);
1261 
1262         ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1263         if (ret)
1264                 goto fail;
1265         ep = urb_priv->ep;
1266         ep_priv = ep->hcpriv;
1267 
1268         debug_urb_unlinked(imx21, urb);
1269 
1270         if (usb_pipeisoc(urb->pipe)) {
1271                 dequeue_isoc_urb(imx21, urb, ep_priv);
1272                 schedule_isoc_etds(hcd, ep);
1273         } else if (urb_priv->active) {
1274                 int etd_num = ep_priv->etd[0];
1275                 if (etd_num != -1) {
1276                         struct etd_priv *etd = &imx21->etd[etd_num];
1277 
1278                         disactivate_etd(imx21, etd_num);
1279                         free_dmem(imx21, etd);
1280                         etd->urb = NULL;
1281                         kfree(etd->bounce_buffer);
1282                         etd->bounce_buffer = NULL;
1283                 }
1284         }
1285 
1286         urb_done(hcd, urb, status);
1287 
1288         spin_unlock_irqrestore(&imx21->lock, flags);
1289         return 0;
1290 
1291 fail:
1292         spin_unlock_irqrestore(&imx21->lock, flags);
1293         return ret;
1294 }
1295 
1296 /* =========================================== */
1297 /* Interrupt dispatch                           */
1298 /* =========================================== */
1299 
1300 static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1301 {
1302         int etd_num;
1303         int enable_sof_int = 0;
1304         unsigned long flags;
1305 
1306         spin_lock_irqsave(&imx21->lock, flags);
1307 
1308         for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
1309                 u32 etd_mask = 1 << etd_num;
1310                 u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
1311                 u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
1312                 struct etd_priv *etd = &imx21->etd[etd_num];
1313 
1314 
1315                 if (done) {
1316                         DEBUG_LOG_FRAME(imx21, etd, last_int);
1317                 } else {
1318 /*
1319  * Kludge warning!
1320  *
1321  * When multiple transfers are using the bus we sometimes get into a state
1322  * where the transfer has completed (the CC field of the ETD is != 0x0F),
1323  * the ETD has self disabled but the ETDDONESTAT flag is not set
1324  * (and hence no interrupt occurs).
1325  * This causes the transfer in question to hang.
1326  * The kludge below checks for this condition at each SOF and processes any
1327  * blocked ETDs (after an arbitrary 10 frame wait)
1328  *
1329  * With a single active transfer the usbtest test suite will run for days
1330  * without the kludge.
1331  * With other bus activity (eg mass storage) even just test1 will hang without
1332  * the kludge.
1333  */
1334                         u32 dword0;
1335                         int cc;
1336 
1337                         if (etd->active_count && !enabled) /* suspicious... */
1338                                 enable_sof_int = 1;
1339 
1340                         if (!sof || enabled || !etd->active_count)
1341                                 continue;
1342 
1343                         cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
1344                         if (cc == TD_NOTACCESSED)
1345                                 continue;
1346 
1347                         if (++etd->active_count < 10)
1348                                 continue;
1349 
1350                         dword0 = etd_readl(imx21, etd_num, 0);
1351                         dev_dbg(imx21->dev,
1352                                 "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1353                                 etd_num, dword0 & 0x7F,
1354                                 (dword0 >> DW0_ENDPNT) & 0x0F,
1355                                 cc);
1356 
1357 #ifdef DEBUG
1358                         dev_dbg(imx21->dev,
1359                                 "frame: act=%d disact=%d"
1360                                 " int=%d req=%d cur=%d\n",
1361                                 etd->activated_frame,
1362                                 etd->disactivated_frame,
1363                                 etd->last_int_frame,
1364                                 etd->last_req_frame,
1365                                 readl(imx21->regs + USBH_FRMNUB));
1366                         imx21->debug_unblocks++;
1367 #endif
1368                         etd->active_count = 0;
1369 /* End of kludge */
1370                 }
1371 
1372                 if (etd->ep == NULL || etd->urb == NULL) {
1373                         dev_dbg(imx21->dev,
1374                                 "Interrupt for unexpected etd %d"
1375                                 " ep=%p urb=%p\n",
1376                                 etd_num, etd->ep, etd->urb);
1377                         disactivate_etd(imx21, etd_num);
1378                         continue;
1379                 }
1380 
1381                 if (usb_pipeisoc(etd->urb->pipe))
1382                         isoc_etd_done(hcd, etd_num);
1383                 else
1384                         nonisoc_etd_done(hcd, etd_num);
1385         }
1386 
1387         /* only enable SOF interrupt if it may be needed for the kludge */
1388         if (enable_sof_int)
1389                 set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1390         else
1391                 clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1392 
1393 
1394         spin_unlock_irqrestore(&imx21->lock, flags);
1395 }
1396 
1397 static irqreturn_t imx21_irq(struct usb_hcd *hcd)
1398 {
1399         struct imx21 *imx21 = hcd_to_imx21(hcd);
1400         u32 ints = readl(imx21->regs + USBH_SYSISR);
1401 
1402         if (ints & USBH_SYSIEN_HERRINT)
1403                 dev_dbg(imx21->dev, "Scheduling error\n");
1404 
1405         if (ints & USBH_SYSIEN_SORINT)
1406                 dev_dbg(imx21->dev, "Scheduling overrun\n");
1407 
1408         if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
1409                 process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
1410 
1411         writel(ints, imx21->regs + USBH_SYSISR);
1412         return IRQ_HANDLED;
1413 }
1414 
1415 static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
1416                                       struct usb_host_endpoint *ep)
1417 {
1418         struct imx21 *imx21 = hcd_to_imx21(hcd);
1419         unsigned long flags;
1420         struct ep_priv *ep_priv;
1421         int i;
1422 
1423         if (ep == NULL)
1424                 return;
1425 
1426         spin_lock_irqsave(&imx21->lock, flags);
1427         ep_priv = ep->hcpriv;
1428         dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
1429 
1430         if (!list_empty(&ep->urb_list))
1431                 dev_dbg(imx21->dev, "ep's URB list is not empty\n");
1432 
1433         if (ep_priv != NULL) {
1434                 for (i = 0; i < NUM_ISO_ETDS; i++) {
1435                         if (ep_priv->etd[i] > -1)
1436                                 dev_dbg(imx21->dev, "free etd %d for disable\n",
1437                                         ep_priv->etd[i]);
1438 
1439                         free_etd(imx21, ep_priv->etd[i]);
1440                 }
1441                 kfree(ep_priv);
1442                 ep->hcpriv = NULL;
1443         }
1444 
1445         for (i = 0; i < USB_NUM_ETD; i++) {
1446                 if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
1447                         dev_err(imx21->dev,
1448                                 "Active etd %d for disabled ep=%p!\n", i, ep);
1449                         free_etd(imx21, i);
1450                 }
1451         }
1452         free_epdmem(imx21, ep);
1453         spin_unlock_irqrestore(&imx21->lock, flags);
1454 }
1455 
1456 /* =========================================== */
1457 /* Hub handling                                 */
1458 /* =========================================== */
1459 
1460 static int get_hub_descriptor(struct usb_hcd *hcd,
1461                               struct usb_hub_descriptor *desc)
1462 {
1463         struct imx21 *imx21 = hcd_to_imx21(hcd);
1464         desc->bDescriptorType = USB_DT_HUB; /* HUB descriptor */
1465         desc->bHubContrCurrent = 0;
1466 
1467         desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
1468                 & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1469         desc->bDescLength = 9;
1470         desc->bPwrOn2PwrGood = 0;
1471         desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
1472                 HUB_CHAR_NO_LPSM |      /* No power switching */
1473                 HUB_CHAR_NO_OCPM);      /* No over current protection */
1474 
1475         desc->u.hs.DeviceRemovable[0] = 1 << 1;
1476         desc->u.hs.DeviceRemovable[1] = ~0;
1477         return 0;
1478 }
1479 
1480 static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
1481 {
1482         struct imx21 *imx21 = hcd_to_imx21(hcd);
1483         int ports;
1484         int changed = 0;
1485         int i;
1486         unsigned long flags;
1487 
1488         spin_lock_irqsave(&imx21->lock, flags);
1489         ports = readl(imx21->regs + USBH_ROOTHUBA)
1490                 & USBH_ROOTHUBA_NDNSTMPRT_MASK;
1491         if (ports > 7) {
1492                 ports = 7;
1493                 dev_err(imx21->dev, "ports %d > 7\n", ports);
1494         }
1495         for (i = 0; i < ports; i++) {
1496                 if (readl(imx21->regs + USBH_PORTSTAT(i)) &
1497                         (USBH_PORTSTAT_CONNECTSC |
1498                         USBH_PORTSTAT_PRTENBLSC |
1499                         USBH_PORTSTAT_PRTSTATSC |
1500                         USBH_PORTSTAT_OVRCURIC |
1501                         USBH_PORTSTAT_PRTRSTSC)) {
1502 
1503                         changed = 1;
1504                         buf[0] |= 1 << (i + 1);
1505                 }
1506         }
1507         spin_unlock_irqrestore(&imx21->lock, flags);
1508 
1509         if (changed)
1510                 dev_info(imx21->dev, "Hub status changed\n");
1511         return changed;
1512 }
1513 
1514 static int imx21_hc_hub_control(struct usb_hcd *hcd,
1515                                 u16 typeReq,
1516                                 u16 wValue, u16 wIndex, char *buf, u16 wLength)
1517 {
1518         struct imx21 *imx21 = hcd_to_imx21(hcd);
1519         int rc = 0;
1520         u32 status_write = 0;
1521 
1522         switch (typeReq) {
1523         case ClearHubFeature:
1524                 dev_dbg(imx21->dev, "ClearHubFeature\n");
1525                 switch (wValue) {
1526                 case C_HUB_OVER_CURRENT:
1527                         dev_dbg(imx21->dev, "    OVER_CURRENT\n");
1528                         break;
1529                 case C_HUB_LOCAL_POWER:
1530                         dev_dbg(imx21->dev, "    LOCAL_POWER\n");
1531                         break;
1532                 default:
1533                         dev_dbg(imx21->dev, "    unknown\n");
1534                         rc = -EINVAL;
1535                         break;
1536                 }
1537                 break;
1538 
1539         case ClearPortFeature:
1540                 dev_dbg(imx21->dev, "ClearPortFeature\n");
1541                 switch (wValue) {
1542                 case USB_PORT_FEAT_ENABLE:
1543                         dev_dbg(imx21->dev, "    ENABLE\n");
1544                         status_write = USBH_PORTSTAT_CURCONST;
1545                         break;
1546                 case USB_PORT_FEAT_SUSPEND:
1547                         dev_dbg(imx21->dev, "    SUSPEND\n");
1548                         status_write = USBH_PORTSTAT_PRTOVRCURI;
1549                         break;
1550                 case USB_PORT_FEAT_POWER:
1551                         dev_dbg(imx21->dev, "    POWER\n");
1552                         status_write = USBH_PORTSTAT_LSDEVCON;
1553                         break;
1554                 case USB_PORT_FEAT_C_ENABLE:
1555                         dev_dbg(imx21->dev, "    C_ENABLE\n");
1556                         status_write = USBH_PORTSTAT_PRTENBLSC;
1557                         break;
1558                 case USB_PORT_FEAT_C_SUSPEND:
1559                         dev_dbg(imx21->dev, "    C_SUSPEND\n");
1560                         status_write = USBH_PORTSTAT_PRTSTATSC;
1561                         break;
1562                 case USB_PORT_FEAT_C_CONNECTION:
1563                         dev_dbg(imx21->dev, "    C_CONNECTION\n");
1564                         status_write = USBH_PORTSTAT_CONNECTSC;
1565                         break;
1566                 case USB_PORT_FEAT_C_OVER_CURRENT:
1567                         dev_dbg(imx21->dev, "    C_OVER_CURRENT\n");
1568                         status_write = USBH_PORTSTAT_OVRCURIC;
1569                         break;
1570                 case USB_PORT_FEAT_C_RESET:
1571                         dev_dbg(imx21->dev, "    C_RESET\n");
1572                         status_write = USBH_PORTSTAT_PRTRSTSC;
1573                         break;
1574                 default:
1575                         dev_dbg(imx21->dev, "    unknown\n");
1576                         rc = -EINVAL;
1577                         break;
1578                 }
1579 
1580                 break;
1581 
1582         case GetHubDescriptor:
1583                 dev_dbg(imx21->dev, "GetHubDescriptor\n");
1584                 rc = get_hub_descriptor(hcd, (void *)buf);
1585                 break;
1586 
1587         case GetHubStatus:
1588                 dev_dbg(imx21->dev, "  GetHubStatus\n");
1589                 *(__le32 *) buf = 0;
1590                 break;
1591 
1592         case GetPortStatus:
1593                 dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
1594                     wIndex, USBH_PORTSTAT(wIndex - 1));
1595                 *(__le32 *) buf = readl(imx21->regs +
1596                         USBH_PORTSTAT(wIndex - 1));
1597                 break;
1598 
1599         case SetHubFeature:
1600                 dev_dbg(imx21->dev, "SetHubFeature\n");
1601                 switch (wValue) {
1602                 case C_HUB_OVER_CURRENT:
1603                         dev_dbg(imx21->dev, "    OVER_CURRENT\n");
1604                         break;
1605 
1606                 case C_HUB_LOCAL_POWER:
1607                         dev_dbg(imx21->dev, "    LOCAL_POWER\n");
1608                         break;
1609                 default:
1610                         dev_dbg(imx21->dev, "    unknown\n");
1611                         rc = -EINVAL;
1612                         break;
1613                 }
1614 
1615                 break;
1616 
1617         case SetPortFeature:
1618                 dev_dbg(imx21->dev, "SetPortFeature\n");
1619                 switch (wValue) {
1620                 case USB_PORT_FEAT_SUSPEND:
1621                         dev_dbg(imx21->dev, "    SUSPEND\n");
1622                         status_write = USBH_PORTSTAT_PRTSUSPST;
1623                         break;
1624                 case USB_PORT_FEAT_POWER:
1625                         dev_dbg(imx21->dev, "    POWER\n");
1626                         status_write = USBH_PORTSTAT_PRTPWRST;
1627                         break;
1628                 case USB_PORT_FEAT_RESET:
1629                         dev_dbg(imx21->dev, "    RESET\n");
1630                         status_write = USBH_PORTSTAT_PRTRSTST;
1631                         break;
1632                 default:
1633                         dev_dbg(imx21->dev, "    unknown\n");
1634                         rc = -EINVAL;
1635                         break;
1636                 }
1637                 break;
1638 
1639         default:
1640                 dev_dbg(imx21->dev, "  unknown\n");
1641                 rc = -EINVAL;
1642                 break;
1643         }
1644 
1645         if (status_write)
1646                 writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
1647         return rc;
1648 }
1649 
1650 /* =========================================== */
1651 /* Host controller management                   */
1652 /* =========================================== */
1653 
1654 static int imx21_hc_reset(struct usb_hcd *hcd)
1655 {
1656         struct imx21 *imx21 = hcd_to_imx21(hcd);
1657         unsigned long timeout;
1658         unsigned long flags;
1659 
1660         spin_lock_irqsave(&imx21->lock, flags);
1661 
1662         /* Reset the Host controller modules */
1663         writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
1664                 USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
1665                 imx21->regs + USBOTG_RST_CTRL);
1666 
1667         /* Wait for reset to finish */
1668         timeout = jiffies + HZ;
1669         while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
1670                 if (time_after(jiffies, timeout)) {
1671                         spin_unlock_irqrestore(&imx21->lock, flags);
1672                         dev_err(imx21->dev, "timeout waiting for reset\n");
1673                         return -ETIMEDOUT;
1674                 }
1675                 spin_unlock_irq(&imx21->lock);
1676                 schedule_timeout_uninterruptible(1);
1677                 spin_lock_irq(&imx21->lock);
1678         }
1679         spin_unlock_irqrestore(&imx21->lock, flags);
1680         return 0;
1681 }
1682 
1683 static int imx21_hc_start(struct usb_hcd *hcd)
1684 {
1685         struct imx21 *imx21 = hcd_to_imx21(hcd);
1686         unsigned long flags;
1687         int i, j;
1688         u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
1689         u32 usb_control = 0;
1690 
1691         hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
1692                         USBOTG_HWMODE_HOSTXCVR_MASK);
1693         hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
1694                         USBOTG_HWMODE_OTGXCVR_MASK);
1695 
1696         if (imx21->pdata->host1_txenoe)
1697                 usb_control |= USBCTRL_HOST1_TXEN_OE;
1698 
1699         if (!imx21->pdata->host1_xcverless)
1700                 usb_control |= USBCTRL_HOST1_BYP_TLL;
1701 
1702         if (imx21->pdata->otg_ext_xcvr)
1703                 usb_control |= USBCTRL_OTC_RCV_RXDP;
1704 
1705 
1706         spin_lock_irqsave(&imx21->lock, flags);
1707 
1708         writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
1709                 imx21->regs + USBOTG_CLK_CTRL);
1710         writel(hw_mode, imx21->regs + USBOTG_HWMODE);
1711         writel(usb_control, imx21->regs + USBCTRL);
1712         writel(USB_MISCCONTROL_SKPRTRY  | USB_MISCCONTROL_ARBMODE,
1713                 imx21->regs + USB_MISCCONTROL);
1714 
1715         /* Clear the ETDs */
1716         for (i = 0; i < USB_NUM_ETD; i++)
1717                 for (j = 0; j < 4; j++)
1718                         etd_writel(imx21, i, j, 0);
1719 
1720         /* Take the HC out of reset */
1721         writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
1722                 imx21->regs + USBH_HOST_CTRL);
1723 
1724         /* Enable ports */
1725         if (imx21->pdata->enable_otg_host)
1726                 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1727                         imx21->regs + USBH_PORTSTAT(0));
1728 
1729         if (imx21->pdata->enable_host1)
1730                 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1731                         imx21->regs + USBH_PORTSTAT(1));
1732 
1733         if (imx21->pdata->enable_host2)
1734                 writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1735                         imx21->regs + USBH_PORTSTAT(2));
1736 
1737 
1738         hcd->state = HC_STATE_RUNNING;
1739 
1740         /* Enable host controller interrupts */
1741         set_register_bits(imx21, USBH_SYSIEN,
1742                 USBH_SYSIEN_HERRINT |
1743                 USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
1744         set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1745 
1746         spin_unlock_irqrestore(&imx21->lock, flags);
1747 
1748         return 0;
1749 }
1750 
1751 static void imx21_hc_stop(struct usb_hcd *hcd)
1752 {
1753         struct imx21 *imx21 = hcd_to_imx21(hcd);
1754         unsigned long flags;
1755 
1756         spin_lock_irqsave(&imx21->lock, flags);
1757 
1758         writel(0, imx21->regs + USBH_SYSIEN);
1759         clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1760         clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
1761                                         USBOTG_CLK_CTRL);
1762         spin_unlock_irqrestore(&imx21->lock, flags);
1763 }
1764 
1765 /* =========================================== */
1766 /* Driver glue                                  */
1767 /* =========================================== */
1768 
1769 static const struct hc_driver imx21_hc_driver = {
1770         .description = hcd_name,
1771         .product_desc = "IMX21 USB Host Controller",
1772         .hcd_priv_size = sizeof(struct imx21),
1773 
1774         .flags = HCD_DMA | HCD_USB11,
1775         .irq = imx21_irq,
1776 
1777         .reset = imx21_hc_reset,
1778         .start = imx21_hc_start,
1779         .stop = imx21_hc_stop,
1780 
1781         /* I/O requests */
1782         .urb_enqueue = imx21_hc_urb_enqueue,
1783         .urb_dequeue = imx21_hc_urb_dequeue,
1784         .endpoint_disable = imx21_hc_endpoint_disable,
1785 
1786         /* scheduling support */
1787         .get_frame_number = imx21_hc_get_frame,
1788 
1789         /* Root hub support */
1790         .hub_status_data = imx21_hc_hub_status_data,
1791         .hub_control = imx21_hc_hub_control,
1792 
1793 };
1794 
1795 static struct mx21_usbh_platform_data default_pdata = {
1796         .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1797         .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1798         .enable_host1 = 1,
1799         .enable_host2 = 1,
1800         .enable_otg_host = 1,
1801 
1802 };
1803 
1804 static int imx21_remove(struct platform_device *pdev)
1805 {
1806         struct usb_hcd *hcd = platform_get_drvdata(pdev);
1807         struct imx21 *imx21 = hcd_to_imx21(hcd);
1808         struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1809 
1810         remove_debug_files(imx21);
1811         usb_remove_hcd(hcd);
1812 
1813         if (res != NULL) {
1814                 clk_disable_unprepare(imx21->clk);
1815                 clk_put(imx21->clk);
1816                 iounmap(imx21->regs);
1817                 release_mem_region(res->start, resource_size(res));
1818         }
1819 
1820         kfree(hcd);
1821         return 0;
1822 }
1823 
1824 
1825 static int imx21_probe(struct platform_device *pdev)
1826 {
1827         struct usb_hcd *hcd;
1828         struct imx21 *imx21;
1829         struct resource *res;
1830         int ret;
1831         int irq;
1832 
1833         printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
1834 
1835         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1836         if (!res)
1837                 return -ENODEV;
1838         irq = platform_get_irq(pdev, 0);
1839         if (irq < 0)
1840                 return irq;
1841 
1842         hcd = usb_create_hcd(&imx21_hc_driver,
1843                 &pdev->dev, dev_name(&pdev->dev));
1844         if (hcd == NULL) {
1845                 dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
1846                     dev_name(&pdev->dev));
1847                 return -ENOMEM;
1848         }
1849 
1850         imx21 = hcd_to_imx21(hcd);
1851         imx21->hcd = hcd;
1852         imx21->dev = &pdev->dev;
1853         imx21->pdata = dev_get_platdata(&pdev->dev);
1854         if (!imx21->pdata)
1855                 imx21->pdata = &default_pdata;
1856 
1857         spin_lock_init(&imx21->lock);
1858         INIT_LIST_HEAD(&imx21->dmem_list);
1859         INIT_LIST_HEAD(&imx21->queue_for_etd);
1860         INIT_LIST_HEAD(&imx21->queue_for_dmem);
1861         create_debug_files(imx21);
1862 
1863         res = request_mem_region(res->start, resource_size(res), hcd_name);
1864         if (!res) {
1865                 ret = -EBUSY;
1866                 goto failed_request_mem;
1867         }
1868 
1869         imx21->regs = ioremap(res->start, resource_size(res));
1870         if (imx21->regs == NULL) {
1871                 dev_err(imx21->dev, "Cannot map registers\n");
1872                 ret = -ENOMEM;
1873                 goto failed_ioremap;
1874         }
1875 
1876         /* Enable clocks source */
1877         imx21->clk = clk_get(imx21->dev, NULL);
1878         if (IS_ERR(imx21->clk)) {
1879                 dev_err(imx21->dev, "no clock found\n");
1880                 ret = PTR_ERR(imx21->clk);
1881                 goto failed_clock_get;
1882         }
1883 
1884         ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
1885         if (ret)
1886                 goto failed_clock_set;
1887         ret = clk_prepare_enable(imx21->clk);
1888         if (ret)
1889                 goto failed_clock_enable;
1890 
1891         dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
1892                 (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
1893 
1894         ret = usb_add_hcd(hcd, irq, 0);
1895         if (ret != 0) {
1896                 dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
1897                 goto failed_add_hcd;
1898         }
1899         device_wakeup_enable(hcd->self.controller);
1900 
1901         return 0;
1902 
1903 failed_add_hcd:
1904         clk_disable_unprepare(imx21->clk);
1905 failed_clock_enable:
1906 failed_clock_set:
1907         clk_put(imx21->clk);
1908 failed_clock_get:
1909         iounmap(imx21->regs);
1910 failed_ioremap:
1911         release_mem_region(res->start, resource_size(res));
1912 failed_request_mem:
1913         remove_debug_files(imx21);
1914         usb_put_hcd(hcd);
1915         return ret;
1916 }
1917 
1918 static struct platform_driver imx21_hcd_driver = {
1919         .driver = {
1920                    .name = hcd_name,
1921                    },
1922         .probe = imx21_probe,
1923         .remove = imx21_remove,
1924         .suspend = NULL,
1925         .resume = NULL,
1926 };
1927 
1928 module_platform_driver(imx21_hcd_driver);
1929 
1930 MODULE_DESCRIPTION("i.MX21 USB Host controller");
1931 MODULE_AUTHOR("Martin Fuzzey");
1932 MODULE_LICENSE("GPL");
1933 MODULE_ALIAS("platform:imx21-hcd");

/* [<][>][^][v][top][bottom][index][help] */