root/drivers/atm/iphase.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ia_init_rtn_q
  2. ia_enque_head_rtn_q
  3. ia_enque_rtn_q
  4. ia_deque_rtn_q
  5. ia_hack_tcq
  6. get_desc
  7. clear_lockup
  8. cellrate_to_float
  9. float_to_cellrate
  10. init_abr_vc
  11. ia_open_abr_vc
  12. ia_cbr_setup
  13. ia_cbrVc_close
  14. ia_avail_descs
  15. ia_que_tx
  16. ia_tx_poll
  17. ia_eeprom_put
  18. ia_eeprom_get
  19. ia_hw_type
  20. ia_phy_read32
  21. ia_phy_write32
  22. ia_frontend_intr
  23. ia_mb25_init
  24. ia_phy_write
  25. ia_suni_pm7345_init_ds3
  26. ia_suni_pm7345_init_e3
  27. ia_suni_pm7345_init
  28. xdump
  29. desc_dbg
  30. rx_excp_rcvd
  31. free_desc
  32. rx_pkt
  33. rx_intr
  34. rx_dle_intr
  35. open_rx
  36. rx_init
  37. tx_intr
  38. tx_dle_intr
  39. open_tx
  40. tx_init
  41. ia_int
  42. get_esi
  43. reset_sar
  44. ia_init
  45. ia_update_stats
  46. ia_led_timer
  47. ia_phy_put
  48. ia_phy_get
  49. ia_free_tx
  50. ia_free_rx
  51. ia_start
  52. ia_close
  53. ia_open
  54. ia_change_qos
  55. ia_ioctl
  56. ia_getsockopt
  57. ia_setsockopt
  58. ia_pkt_tx
  59. ia_send
  60. ia_proc_read
  61. ia_init_one
  62. ia_remove_one
  63. ia_module_init
  64. ia_module_exit

   1 /******************************************************************************
   2          iphase.c: Device driver for Interphase ATM PCI adapter cards 
   3                     Author: Peter Wang  <pwang@iphase.com>            
   4                    Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   5                    Interphase Corporation  <www.iphase.com>           
   6                                Version: 1.0                           
   7 *******************************************************************************
   8       
   9       This software may be used and distributed according to the terms
  10       of the GNU General Public License (GPL), incorporated herein by reference.
  11       Drivers based on this skeleton fall under the GPL and must retain
  12       the authorship (implicit copyright) notice.
  13 
  14       This program is distributed in the hope that it will be useful, but
  15       WITHOUT ANY WARRANTY; without even the implied warranty of
  16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17       General Public License for more details.
  18       
  19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which 
  20       was originally written by Monalisa Agrawal at UNH. Now this driver 
  21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter 
  22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) 
  23       in terms of PHY type, the size of control memory and the size of 
  24       packet memory. The following are the change log and history:
  25      
  26           Bugfix the Mona's UBR driver.
  27           Modify the basic memory allocation and dma logic.
  28           Port the driver to the latest kernel from 2.0.46.
  29           Complete the ABR logic of the driver, and added the ABR work-
  30               around for the hardware anormalies.
  31           Add the CBR support.
  32           Add the flow control logic to the driver to allow rate-limit VC.
  33           Add 4K VC support to the board with 512K control memory.
  34           Add the support of all the variants of the Interphase ATM PCI 
  35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
  36           (25M UTP25) and x531 (DS3 and E3).
  37           Add SMP support.
  38 
  39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
  40 
  41 *******************************************************************************/
  42 
  43 #include <linux/module.h>  
  44 #include <linux/kernel.h>  
  45 #include <linux/mm.h>  
  46 #include <linux/pci.h>  
  47 #include <linux/errno.h>  
  48 #include <linux/atm.h>  
  49 #include <linux/atmdev.h>  
  50 #include <linux/sonet.h>  
  51 #include <linux/skbuff.h>  
  52 #include <linux/time.h>  
  53 #include <linux/delay.h>  
  54 #include <linux/uio.h>  
  55 #include <linux/init.h>  
  56 #include <linux/interrupt.h>
  57 #include <linux/wait.h>
  58 #include <linux/slab.h>
  59 #include <asm/io.h>  
  60 #include <linux/atomic.h>
  61 #include <linux/uaccess.h>  
  62 #include <asm/string.h>  
  63 #include <asm/byteorder.h>  
  64 #include <linux/vmalloc.h>
  65 #include <linux/jiffies.h>
  66 #include <linux/nospec.h>
  67 #include "iphase.h"               
  68 #include "suni.h"                 
  69 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
  70 
  71 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
  72 
  73 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
  74 static void desc_dbg(IADEV *iadev);
  75 
  76 static IADEV *ia_dev[8];
  77 static struct atm_dev *_ia_dev[8];
  78 static int iadev_count;
  79 static void ia_led_timer(struct timer_list *unused);
  80 static DEFINE_TIMER(ia_timer, ia_led_timer);
  81 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
  82 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
  83 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
  84             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; 
  85 
  86 module_param(IA_TX_BUF, int, 0);
  87 module_param(IA_TX_BUF_SZ, int, 0);
  88 module_param(IA_RX_BUF, int, 0);
  89 module_param(IA_RX_BUF_SZ, int, 0);
  90 module_param(IADebugFlag, uint, 0644);
  91 
  92 MODULE_LICENSE("GPL");
  93 
  94 /**************************** IA_LIB **********************************/
  95 
  96 static void ia_init_rtn_q (IARTN_Q *que) 
  97 { 
  98    que->next = NULL; 
  99    que->tail = NULL; 
 100 }
 101 
 102 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) 
 103 {
 104    data->next = NULL;
 105    if (que->next == NULL) 
 106       que->next = que->tail = data;
 107    else {
 108       data->next = que->next;
 109       que->next = data;
 110    } 
 111    return;
 112 }
 113 
 114 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
 115    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 116    if (!entry)
 117       return -ENOMEM;
 118    entry->data = data;
 119    entry->next = NULL;
 120    if (que->next == NULL) 
 121       que->next = que->tail = entry;
 122    else {
 123       que->tail->next = entry;
 124       que->tail = que->tail->next;
 125    }      
 126    return 1;
 127 }
 128 
 129 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
 130    IARTN_Q *tmpdata;
 131    if (que->next == NULL)
 132       return NULL;
 133    tmpdata = que->next;
 134    if ( que->next == que->tail)  
 135       que->next = que->tail = NULL;
 136    else 
 137       que->next = que->next->next;
 138    return tmpdata;
 139 }
 140 
 141 static void ia_hack_tcq(IADEV *dev) {
 142 
 143   u_short               desc1;
 144   u_short               tcq_wr;
 145   struct ia_vcc         *iavcc_r = NULL; 
 146 
 147   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
 148   while (dev->host_tcq_wr != tcq_wr) {
 149      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
 150      if (!desc1) ;
 151      else if (!dev->desc_tbl[desc1 -1].timestamp) {
 152         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
 153         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
 154      }                                 
 155      else if (dev->desc_tbl[desc1 -1].timestamp) {
 156         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { 
 157            printk("IA: Fatal err in get_desc\n");
 158            continue;
 159         }
 160         iavcc_r->vc_desc_cnt--;
 161         dev->desc_tbl[desc1 -1].timestamp = 0;
 162         IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
 163                                    dev->desc_tbl[desc1 -1].txskb, desc1);)
 164         if (iavcc_r->pcr < dev->rate_limit) {
 165            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
 166            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
 167               printk("ia_hack_tcq: No memory available\n");
 168         } 
 169         dev->desc_tbl[desc1 -1].iavcc = NULL;
 170         dev->desc_tbl[desc1 -1].txskb = NULL;
 171      }
 172      dev->host_tcq_wr += 2;
 173      if (dev->host_tcq_wr > dev->ffL.tcq_ed) 
 174         dev->host_tcq_wr = dev->ffL.tcq_st;
 175   }
 176 } /* ia_hack_tcq */
 177 
 178 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
 179   u_short               desc_num, i;
 180   struct sk_buff        *skb;
 181   struct ia_vcc         *iavcc_r = NULL; 
 182   unsigned long delta;
 183   static unsigned long timer = 0;
 184   int ltimeout;
 185 
 186   ia_hack_tcq (dev);
 187   if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
 188      timer = jiffies; 
 189      i=0;
 190      while (i < dev->num_tx_desc) {
 191         if (!dev->desc_tbl[i].timestamp) {
 192            i++;
 193            continue;
 194         }
 195         ltimeout = dev->desc_tbl[i].iavcc->ltimeout; 
 196         delta = jiffies - dev->desc_tbl[i].timestamp;
 197         if (delta >= ltimeout) {
 198            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
 199            if (dev->ffL.tcq_rd == dev->ffL.tcq_st) 
 200               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
 201            else 
 202               dev->ffL.tcq_rd -= 2;
 203            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
 204            if (!(skb = dev->desc_tbl[i].txskb) || 
 205                           !(iavcc_r = dev->desc_tbl[i].iavcc))
 206               printk("Fatal err, desc table vcc or skb is NULL\n");
 207            else 
 208               iavcc_r->vc_desc_cnt--;
 209            dev->desc_tbl[i].timestamp = 0;
 210            dev->desc_tbl[i].iavcc = NULL;
 211            dev->desc_tbl[i].txskb = NULL;
 212         }
 213         i++;
 214      } /* while */
 215   }
 216   if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
 217      return 0xFFFF;
 218     
 219   /* Get the next available descriptor number from TCQ */
 220   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
 221 
 222   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
 223      dev->ffL.tcq_rd += 2;
 224      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
 225         dev->ffL.tcq_rd = dev->ffL.tcq_st;
 226      if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
 227         return 0xFFFF; 
 228      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
 229   }
 230 
 231   /* get system time */
 232   dev->desc_tbl[desc_num -1].timestamp = jiffies;
 233   return desc_num;
 234 }
 235 
 236 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
 237   u_char                foundLockUp;
 238   vcstatus_t            *vcstatus;
 239   u_short               *shd_tbl;
 240   u_short               tempCellSlot, tempFract;
 241   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
 242   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
 243   u_int  i;
 244 
 245   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
 246      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
 247      vcstatus->cnt++;
 248      foundLockUp = 0;
 249      if( vcstatus->cnt == 0x05 ) {
 250         abr_vc += vcc->vci;
 251         eabr_vc += vcc->vci;
 252         if( eabr_vc->last_desc ) {
 253            if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
 254               /* Wait for 10 Micro sec */
 255               udelay(10);
 256               if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
 257                  foundLockUp = 1;
 258            }
 259            else {
 260               tempCellSlot = abr_vc->last_cell_slot;
 261               tempFract    = abr_vc->fraction;
 262               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
 263                          && (tempFract == dev->testTable[vcc->vci]->fract))
 264                  foundLockUp = 1;                   
 265               dev->testTable[vcc->vci]->lastTime = tempCellSlot;   
 266               dev->testTable[vcc->vci]->fract = tempFract; 
 267            }        
 268         } /* last descriptor */            
 269         vcstatus->cnt = 0;      
 270      } /* vcstatus->cnt */
 271         
 272      if (foundLockUp) {
 273         IF_ABR(printk("LOCK UP found\n");) 
 274         writew(0xFFFD, dev->seg_reg+MODE_REG_0);
 275         /* Wait for 10 Micro sec */
 276         udelay(10); 
 277         abr_vc->status &= 0xFFF8;
 278         abr_vc->status |= 0x0001;  /* state is idle */
 279         shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;                
 280         for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
 281         if (i < dev->num_vc)
 282            shd_tbl[i] = vcc->vci;
 283         else
 284            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
 285         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
 286         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
 287         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);       
 288         vcstatus->cnt = 0;
 289      } /* foundLockUp */
 290 
 291   } /* if an ABR VC */
 292 
 293 
 294 }
 295  
 296 /*
 297 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
 298 **
 299 **  +----+----+------------------+-------------------------------+
 300 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
 301 **  +----+----+------------------+-------------------------------+
 302 ** 
 303 **    R = reserved (written as 0)
 304 **    NZ = 0 if 0 cells/sec; 1 otherwise
 305 **
 306 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
 307 */
 308 static u16
 309 cellrate_to_float(u32 cr)
 310 {
 311 
 312 #define NZ              0x4000
 313 #define M_BITS          9               /* Number of bits in mantissa */
 314 #define E_BITS          5               /* Number of bits in exponent */
 315 #define M_MASK          0x1ff           
 316 #define E_MASK          0x1f
 317   u16   flot;
 318   u32   tmp = cr & 0x00ffffff;
 319   int   i   = 0;
 320   if (cr == 0)
 321      return 0;
 322   while (tmp != 1) {
 323      tmp >>= 1;
 324      i++;
 325   }
 326   if (i == M_BITS)
 327      flot = NZ | (i << M_BITS) | (cr & M_MASK);
 328   else if (i < M_BITS)
 329      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
 330   else
 331      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
 332   return flot;
 333 }
 334 
 335 #if 0
 336 /*
 337 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
 338 */
 339 static u32
 340 float_to_cellrate(u16 rate)
 341 {
 342   u32   exp, mantissa, cps;
 343   if ((rate & NZ) == 0)
 344      return 0;
 345   exp = (rate >> M_BITS) & E_MASK;
 346   mantissa = rate & M_MASK;
 347   if (exp == 0)
 348      return 1;
 349   cps = (1 << M_BITS) | mantissa;
 350   if (exp == M_BITS)
 351      cps = cps;
 352   else if (exp > M_BITS)
 353      cps <<= (exp - M_BITS);
 354   else
 355      cps >>= (M_BITS - exp);
 356   return cps;
 357 }
 358 #endif 
 359 
 360 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
 361   srv_p->class_type = ATM_ABR;
 362   srv_p->pcr        = dev->LineRate;
 363   srv_p->mcr        = 0;
 364   srv_p->icr        = 0x055cb7;
 365   srv_p->tbe        = 0xffffff;
 366   srv_p->frtt       = 0x3a;
 367   srv_p->rif        = 0xf;
 368   srv_p->rdf        = 0xb;
 369   srv_p->nrm        = 0x4;
 370   srv_p->trm        = 0x7;
 371   srv_p->cdf        = 0x3;
 372   srv_p->adtf       = 50;
 373 }
 374 
 375 static int
 376 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, 
 377                                                 struct atm_vcc *vcc, u8 flag)
 378 {
 379   f_vc_abr_entry  *f_abr_vc;
 380   r_vc_abr_entry  *r_abr_vc;
 381   u32           icr;
 382   u8            trm, nrm, crm;
 383   u16           adtf, air, *ptr16;      
 384   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
 385   f_abr_vc += vcc->vci;       
 386   switch (flag) {
 387      case 1: /* FFRED initialization */
 388 #if 0  /* sanity check */
 389        if (srv_p->pcr == 0)
 390           return INVALID_PCR;
 391        if (srv_p->pcr > dev->LineRate)
 392           srv_p->pcr = dev->LineRate;
 393        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
 394           return MCR_UNAVAILABLE;
 395        if (srv_p->mcr > srv_p->pcr)
 396           return INVALID_MCR;
 397        if (!(srv_p->icr))
 398           srv_p->icr = srv_p->pcr;
 399        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
 400           return INVALID_ICR;
 401        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
 402           return INVALID_TBE;
 403        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
 404           return INVALID_FRTT;
 405        if (srv_p->nrm > MAX_NRM)
 406           return INVALID_NRM;
 407        if (srv_p->trm > MAX_TRM)
 408           return INVALID_TRM;
 409        if (srv_p->adtf > MAX_ADTF)
 410           return INVALID_ADTF;
 411        else if (srv_p->adtf == 0)
 412           srv_p->adtf = 1;
 413        if (srv_p->cdf > MAX_CDF)
 414           return INVALID_CDF;
 415        if (srv_p->rif > MAX_RIF)
 416           return INVALID_RIF;
 417        if (srv_p->rdf > MAX_RDF)
 418           return INVALID_RDF;
 419 #endif
 420        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
 421        f_abr_vc->f_vc_type = ABR;
 422        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
 423                                   /* i.e 2**n = 2 << (n-1) */
 424        f_abr_vc->f_nrm = nrm << 8 | nrm;
 425        trm = 100000/(2 << (16 - srv_p->trm));
 426        if ( trm == 0) trm = 1;
 427        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
 428        crm = srv_p->tbe / nrm;
 429        if (crm == 0) crm = 1;
 430        f_abr_vc->f_crm = crm & 0xff;
 431        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
 432        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
 433                                 ((srv_p->tbe/srv_p->frtt)*1000000) :
 434                                 (1000000/(srv_p->frtt/srv_p->tbe)));
 435        f_abr_vc->f_icr = cellrate_to_float(icr);
 436        adtf = (10000 * srv_p->adtf)/8192;
 437        if (adtf == 0) adtf = 1; 
 438        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
 439        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
 440        f_abr_vc->f_acr = f_abr_vc->f_icr;
 441        f_abr_vc->f_status = 0x0042;
 442        break;
 443     case 0: /* RFRED initialization */  
 444        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); 
 445        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
 446        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
 447        r_abr_vc += vcc->vci;
 448        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
 449        air = srv_p->pcr << (15 - srv_p->rif);
 450        if (air == 0) air = 1;
 451        r_abr_vc->r_air = cellrate_to_float(air);
 452        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
 453        dev->sum_mcr        += srv_p->mcr;
 454        dev->n_abr++;
 455        break;
 456     default:
 457        break;
 458   }
 459   return        0;
 460 }
 461 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
 462    u32 rateLow=0, rateHigh, rate;
 463    int entries;
 464    struct ia_vcc *ia_vcc;
 465 
 466    int   idealSlot =0, testSlot, toBeAssigned, inc;
 467    u32   spacing;
 468    u16  *SchedTbl, *TstSchedTbl;
 469    u16  cbrVC, vcIndex;
 470    u32   fracSlot    = 0;
 471    u32   sp_mod      = 0;
 472    u32   sp_mod2     = 0;
 473 
 474    /* IpAdjustTrafficParams */
 475    if (vcc->qos.txtp.max_pcr <= 0) {
 476       IF_ERR(printk("PCR for CBR not defined\n");)
 477       return -1;
 478    }
 479    rate = vcc->qos.txtp.max_pcr;
 480    entries = rate / dev->Granularity;
 481    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
 482                                 entries, rate, dev->Granularity);)
 483    if (entries < 1)
 484       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) 
 485    rateLow  =  entries * dev->Granularity;
 486    rateHigh = (entries + 1) * dev->Granularity;
 487    if (3*(rate - rateLow) > (rateHigh - rate))
 488       entries++;
 489    if (entries > dev->CbrRemEntries) {
 490       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
 491       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
 492                                        entries, dev->CbrRemEntries);)
 493       return -EBUSY;
 494    }   
 495 
 496    ia_vcc = INPH_IA_VCC(vcc);
 497    ia_vcc->NumCbrEntry = entries; 
 498    dev->sum_mcr += entries * dev->Granularity; 
 499    /* IaFFrednInsertCbrSched */
 500    // Starting at an arbitrary location, place the entries into the table
 501    // as smoothly as possible
 502    cbrVC   = 0;
 503    spacing = dev->CbrTotEntries / entries;
 504    sp_mod  = dev->CbrTotEntries % entries; // get modulo
 505    toBeAssigned = entries;
 506    fracSlot = 0;
 507    vcIndex  = vcc->vci;
 508    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
 509    while (toBeAssigned)
 510    {
 511       // If this is the first time, start the table loading for this connection
 512       // as close to entryPoint as possible.
 513       if (toBeAssigned == entries)
 514       {
 515          idealSlot = dev->CbrEntryPt;
 516          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
 517          if (dev->CbrEntryPt >= dev->CbrTotEntries) 
 518             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
 519       } else {
 520          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
 521          // in the table that would be  smoothest
 522          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
 523          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
 524       }
 525       if (idealSlot >= (int)dev->CbrTotEntries) 
 526          idealSlot -= dev->CbrTotEntries;  
 527       // Continuously check around this ideal value until a null
 528       // location is encountered.
 529       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); 
 530       inc = 0;
 531       testSlot = idealSlot;
 532       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
 533       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
 534                                 testSlot, TstSchedTbl,toBeAssigned);)
 535       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
 536       while (cbrVC)  // If another VC at this location, we have to keep looking
 537       {
 538           inc++;
 539           testSlot = idealSlot - inc;
 540           if (testSlot < 0) { // Wrap if necessary
 541              testSlot += dev->CbrTotEntries;
 542              IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
 543                                                        SchedTbl,testSlot);)
 544           }
 545           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
 546           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); 
 547           if (!cbrVC)
 548              break;
 549           testSlot = idealSlot + inc;
 550           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
 551              testSlot -= dev->CbrTotEntries;
 552              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
 553              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", 
 554                                             testSlot, toBeAssigned);)
 555           } 
 556           // set table index and read in value
 557           TstSchedTbl = (u16*)(SchedTbl + testSlot);
 558           IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
 559                           TstSchedTbl,cbrVC,inc);)
 560           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
 561        } /* while */
 562        // Move this VCI number into this location of the CBR Sched table.
 563        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
 564        dev->CbrRemEntries--;
 565        toBeAssigned--;
 566    } /* while */ 
 567 
 568    /* IaFFrednCbrEnable */
 569    dev->NumEnabledCBR++;
 570    if (dev->NumEnabledCBR == 1) {
 571        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
 572        IF_CBR(printk("CBR is enabled\n");)
 573    }
 574    return 0;
 575 }
 576 static void ia_cbrVc_close (struct atm_vcc *vcc) {
 577    IADEV *iadev;
 578    u16 *SchedTbl, NullVci = 0;
 579    u32 i, NumFound;
 580 
 581    iadev = INPH_IA_DEV(vcc->dev);
 582    iadev->NumEnabledCBR--;
 583    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
 584    if (iadev->NumEnabledCBR == 0) {
 585       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
 586       IF_CBR (printk("CBR support disabled\n");)
 587    }
 588    NumFound = 0;
 589    for (i=0; i < iadev->CbrTotEntries; i++)
 590    {
 591       if (*SchedTbl == vcc->vci) {
 592          iadev->CbrRemEntries++;
 593          *SchedTbl = NullVci;
 594          IF_CBR(NumFound++;)
 595       }
 596       SchedTbl++;   
 597    } 
 598    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
 599 }
 600 
 601 static int ia_avail_descs(IADEV *iadev) {
 602    int tmp = 0;
 603    ia_hack_tcq(iadev);
 604    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
 605       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
 606    else
 607       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
 608                    iadev->ffL.tcq_st) / 2;
 609    return tmp;
 610 }    
 611 
 612 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
 613 
 614 static int ia_que_tx (IADEV *iadev) { 
 615    struct sk_buff *skb;
 616    int num_desc;
 617    struct atm_vcc *vcc;
 618    num_desc = ia_avail_descs(iadev);
 619 
 620    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
 621       if (!(vcc = ATM_SKB(skb)->vcc)) {
 622          dev_kfree_skb_any(skb);
 623          printk("ia_que_tx: Null vcc\n");
 624          break;
 625       }
 626       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
 627          dev_kfree_skb_any(skb);
 628          printk("Free the SKB on closed vci %d \n", vcc->vci);
 629          break;
 630       }
 631       if (ia_pkt_tx (vcc, skb)) {
 632          skb_queue_head(&iadev->tx_backlog, skb);
 633       }
 634       num_desc--;
 635    }
 636    return 0;
 637 }
 638 
 639 static void ia_tx_poll (IADEV *iadev) {
 640    struct atm_vcc *vcc = NULL;
 641    struct sk_buff *skb = NULL, *skb1 = NULL;
 642    struct ia_vcc *iavcc;
 643    IARTN_Q *  rtne;
 644 
 645    ia_hack_tcq(iadev);
 646    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
 647        skb = rtne->data.txskb;
 648        if (!skb) {
 649            printk("ia_tx_poll: skb is null\n");
 650            goto out;
 651        }
 652        vcc = ATM_SKB(skb)->vcc;
 653        if (!vcc) {
 654            printk("ia_tx_poll: vcc is null\n");
 655            dev_kfree_skb_any(skb);
 656            goto out;
 657        }
 658 
 659        iavcc = INPH_IA_VCC(vcc);
 660        if (!iavcc) {
 661            printk("ia_tx_poll: iavcc is null\n");
 662            dev_kfree_skb_any(skb);
 663            goto out;
 664        }
 665 
 666        skb1 = skb_dequeue(&iavcc->txing_skb);
 667        while (skb1 && (skb1 != skb)) {
 668           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
 669              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
 670           }
 671           IF_ERR(printk("Release the SKB not match\n");)
 672           if ((vcc->pop) && (skb1->len != 0))
 673           {
 674              vcc->pop(vcc, skb1);
 675              IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
 676                                                           (long)skb1);)
 677           }
 678           else 
 679              dev_kfree_skb_any(skb1);
 680           skb1 = skb_dequeue(&iavcc->txing_skb);
 681        }                                                        
 682        if (!skb1) {
 683           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
 684           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
 685           break;
 686        }
 687        if ((vcc->pop) && (skb->len != 0))
 688        {
 689           vcc->pop(vcc, skb);
 690           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
 691        }
 692        else 
 693           dev_kfree_skb_any(skb);
 694        kfree(rtne);
 695     }
 696     ia_que_tx(iadev);
 697 out:
 698     return;
 699 }
 700 #if 0
 701 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
 702 {
 703         u32     t;
 704         int     i;
 705         /*
 706          * Issue a command to enable writes to the NOVRAM
 707          */
 708         NVRAM_CMD (EXTEND + EWEN);
 709         NVRAM_CLR_CE;
 710         /*
 711          * issue the write command
 712          */
 713         NVRAM_CMD(IAWRITE + addr);
 714         /* 
 715          * Send the data, starting with D15, then D14, and so on for 16 bits
 716          */
 717         for (i=15; i>=0; i--) {
 718                 NVRAM_CLKOUT (val & 0x8000);
 719                 val <<= 1;
 720         }
 721         NVRAM_CLR_CE;
 722         CFG_OR(NVCE);
 723         t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
 724         while (!(t & NVDO))
 725                 t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); 
 726 
 727         NVRAM_CLR_CE;
 728         /*
 729          * disable writes again
 730          */
 731         NVRAM_CMD(EXTEND + EWDS)
 732         NVRAM_CLR_CE;
 733         CFG_AND(~NVDI);
 734 }
 735 #endif
 736 
 737 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
 738 {
 739         u_short val;
 740         u32     t;
 741         int     i;
 742         /*
 743          * Read the first bit that was clocked with the falling edge of the
 744          * the last command data clock
 745          */
 746         NVRAM_CMD(IAREAD + addr);
 747         /*
 748          * Now read the rest of the bits, the next bit read is D14, then D13,
 749          * and so on.
 750          */
 751         val = 0;
 752         for (i=15; i>=0; i--) {
 753                 NVRAM_CLKIN(t);
 754                 val |= (t << i);
 755         }
 756         NVRAM_CLR_CE;
 757         CFG_AND(~NVDI);
 758         return val;
 759 }
 760 
 761 static void ia_hw_type(IADEV *iadev) {
 762    u_short memType = ia_eeprom_get(iadev, 25);   
 763    iadev->memType = memType;
 764    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
 765       iadev->num_tx_desc = IA_TX_BUF;
 766       iadev->tx_buf_sz = IA_TX_BUF_SZ;
 767       iadev->num_rx_desc = IA_RX_BUF;
 768       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
 769    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
 770       if (IA_TX_BUF == DFL_TX_BUFFERS)
 771         iadev->num_tx_desc = IA_TX_BUF / 2;
 772       else 
 773         iadev->num_tx_desc = IA_TX_BUF;
 774       iadev->tx_buf_sz = IA_TX_BUF_SZ;
 775       if (IA_RX_BUF == DFL_RX_BUFFERS)
 776         iadev->num_rx_desc = IA_RX_BUF / 2;
 777       else
 778         iadev->num_rx_desc = IA_RX_BUF;
 779       iadev->rx_buf_sz = IA_RX_BUF_SZ;
 780    }
 781    else {
 782       if (IA_TX_BUF == DFL_TX_BUFFERS) 
 783         iadev->num_tx_desc = IA_TX_BUF / 8;
 784       else
 785         iadev->num_tx_desc = IA_TX_BUF;
 786       iadev->tx_buf_sz = IA_TX_BUF_SZ;
 787       if (IA_RX_BUF == DFL_RX_BUFFERS)
 788         iadev->num_rx_desc = IA_RX_BUF / 8;
 789       else
 790         iadev->num_rx_desc = IA_RX_BUF;
 791       iadev->rx_buf_sz = IA_RX_BUF_SZ; 
 792    } 
 793    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); 
 794    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
 795          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
 796          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
 797 
 798 #if 0
 799    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
 800       iadev->phy_type = PHY_OC3C_S;
 801    else if ((memType & FE_MASK) == FE_UTP_OPTION)
 802       iadev->phy_type = PHY_UTP155;
 803    else
 804      iadev->phy_type = PHY_OC3C_M;
 805 #endif
 806    
 807    iadev->phy_type = memType & FE_MASK;
 808    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", 
 809                                          memType,iadev->phy_type);)
 810    if (iadev->phy_type == FE_25MBIT_PHY) 
 811       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
 812    else if (iadev->phy_type == FE_DS3_PHY)
 813       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
 814    else if (iadev->phy_type == FE_E3_PHY) 
 815       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
 816    else
 817        iadev->LineRate = (u32)(ATM_OC3_PCR);
 818    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
 819 
 820 }
 821 
 822 static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
 823 {
 824         return readl(ia->phy + (reg >> 2));
 825 }
 826 
 827 static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
 828 {
 829         writel(val, ia->phy + (reg >> 2));
 830 }
 831 
 832 static void ia_frontend_intr(struct iadev_priv *iadev)
 833 {
 834         u32 status;
 835 
 836         if (iadev->phy_type & FE_25MBIT_PHY) {
 837                 status = ia_phy_read32(iadev, MB25_INTR_STATUS);
 838                 iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
 839         } else if (iadev->phy_type & FE_DS3_PHY) {
 840                 ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
 841                 status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
 842                 iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
 843         } else if (iadev->phy_type & FE_E3_PHY) {
 844                 ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
 845                 status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
 846                 iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
 847         } else {
 848                 status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
 849                 iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
 850         }
 851 
 852         printk(KERN_INFO "IA: SUNI carrier %s\n",
 853                 iadev->carrier_detect ? "detected" : "lost signal");
 854 }
 855 
 856 static void ia_mb25_init(struct iadev_priv *iadev)
 857 {
 858 #if 0
 859    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
 860 #endif
 861         ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
 862         ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
 863 
 864         iadev->carrier_detect =
 865                 (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
 866 }
 867 
 868 struct ia_reg {
 869         u16 reg;
 870         u16 val;
 871 };
 872 
 873 static void ia_phy_write(struct iadev_priv *iadev,
 874                          const struct ia_reg *regs, int len)
 875 {
 876         while (len--) {
 877                 ia_phy_write32(iadev, regs->reg, regs->val);
 878                 regs++;
 879         }
 880 }
 881 
 882 static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
 883 {
 884         static const struct ia_reg suni_ds3_init[] = {
 885                 { SUNI_DS3_FRM_INTR_ENBL,       0x17 },
 886                 { SUNI_DS3_FRM_CFG,             0x01 },
 887                 { SUNI_DS3_TRAN_CFG,            0x01 },
 888                 { SUNI_CONFIG,                  0 },
 889                 { SUNI_SPLR_CFG,                0 },
 890                 { SUNI_SPLT_CFG,                0 }
 891         };
 892         u32 status;
 893 
 894         status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
 895         iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
 896 
 897         ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
 898 }
 899 
 900 static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
 901 {
 902         static const struct ia_reg suni_e3_init[] = {
 903                 { SUNI_E3_FRM_FRAM_OPTIONS,             0x04 },
 904                 { SUNI_E3_FRM_MAINT_OPTIONS,            0x20 },
 905                 { SUNI_E3_FRM_FRAM_INTR_ENBL,           0x1d },
 906                 { SUNI_E3_FRM_MAINT_INTR_ENBL,          0x30 },
 907                 { SUNI_E3_TRAN_STAT_DIAG_OPTIONS,       0 },
 908                 { SUNI_E3_TRAN_FRAM_OPTIONS,            0x01 },
 909                 { SUNI_CONFIG,                          SUNI_PM7345_E3ENBL },
 910                 { SUNI_SPLR_CFG,                        0x41 },
 911                 { SUNI_SPLT_CFG,                        0x41 }
 912         };
 913         u32 status;
 914 
 915         status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
 916         iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
 917         ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
 918 }
 919 
 920 static void ia_suni_pm7345_init(struct iadev_priv *iadev)
 921 {
 922         static const struct ia_reg suni_init[] = {
 923                 /* Enable RSOP loss of signal interrupt. */
 924                 { SUNI_INTR_ENBL,               0x28 },
 925                 /* Clear error counters. */
 926                 { SUNI_ID_RESET,                0 },
 927                 /* Clear "PMCTST" in master test register. */
 928                 { SUNI_MASTER_TEST,             0 },
 929 
 930                 { SUNI_RXCP_CTRL,               0x2c },
 931                 { SUNI_RXCP_FCTRL,              0x81 },
 932 
 933                 { SUNI_RXCP_IDLE_PAT_H1,        0 },
 934                 { SUNI_RXCP_IDLE_PAT_H2,        0 },
 935                 { SUNI_RXCP_IDLE_PAT_H3,        0 },
 936                 { SUNI_RXCP_IDLE_PAT_H4,        0x01 },
 937 
 938                 { SUNI_RXCP_IDLE_MASK_H1,       0xff },
 939                 { SUNI_RXCP_IDLE_MASK_H2,       0xff },
 940                 { SUNI_RXCP_IDLE_MASK_H3,       0xff },
 941                 { SUNI_RXCP_IDLE_MASK_H4,       0xfe },
 942 
 943                 { SUNI_RXCP_CELL_PAT_H1,        0 },
 944                 { SUNI_RXCP_CELL_PAT_H2,        0 },
 945                 { SUNI_RXCP_CELL_PAT_H3,        0 },
 946                 { SUNI_RXCP_CELL_PAT_H4,        0x01 },
 947 
 948                 { SUNI_RXCP_CELL_MASK_H1,       0xff },
 949                 { SUNI_RXCP_CELL_MASK_H2,       0xff },
 950                 { SUNI_RXCP_CELL_MASK_H3,       0xff },
 951                 { SUNI_RXCP_CELL_MASK_H4,       0xff },
 952 
 953                 { SUNI_TXCP_CTRL,               0xa4 },
 954                 { SUNI_TXCP_INTR_EN_STS,        0x10 },
 955                 { SUNI_TXCP_IDLE_PAT_H5,        0x55 }
 956         };
 957 
 958         if (iadev->phy_type & FE_DS3_PHY)
 959                 ia_suni_pm7345_init_ds3(iadev);
 960         else
 961                 ia_suni_pm7345_init_e3(iadev);
 962 
 963         ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
 964 
 965         ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
 966                 ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
 967                   SUNI_PM7345_DLB | SUNI_PM7345_PLB));
 968 #ifdef __SNMP__
 969    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
 970 #endif /* __SNMP__ */
 971    return;
 972 }
 973 
 974 
 975 /***************************** IA_LIB END *****************************/
 976     
 977 #ifdef CONFIG_ATM_IA_DEBUG
 978 static int tcnter = 0;
 979 static void xdump( u_char*  cp, int  length, char*  prefix )
 980 {
 981     int col, count;
 982     u_char prntBuf[120];
 983     u_char*  pBuf = prntBuf;
 984     count = 0;
 985     while(count < length){
 986         pBuf += sprintf( pBuf, "%s", prefix );
 987         for(col = 0;count + col < length && col < 16; col++){
 988             if (col != 0 && (col % 4) == 0)
 989                 pBuf += sprintf( pBuf, " " );
 990             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
 991         }
 992         while(col++ < 16){      /* pad end of buffer with blanks */
 993             if ((col % 4) == 0)
 994                 sprintf( pBuf, " " );
 995             pBuf += sprintf( pBuf, "   " );
 996         }
 997         pBuf += sprintf( pBuf, "  " );
 998         for(col = 0;count + col < length && col < 16; col++){
 999             if (isprint((int)cp[count + col]))
1000                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
1001             else
1002                 pBuf += sprintf( pBuf, "." );
1003                 }
1004         printk("%s\n", prntBuf);
1005         count += col;
1006         pBuf = prntBuf;
1007     }
1008 
1009 }  /* close xdump(... */
1010 #endif /* CONFIG_ATM_IA_DEBUG */
1011 
1012   
1013 static struct atm_dev *ia_boards = NULL;  
1014   
1015 #define ACTUAL_RAM_BASE \
1016         RAM_BASE*((iadev->mem)/(128 * 1024))  
1017 #define ACTUAL_SEG_RAM_BASE \
1018         IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1019 #define ACTUAL_REASS_RAM_BASE \
1020         IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))  
1021   
1022   
1023 /*-- some utilities and memory allocation stuff will come here -------------*/  
1024   
1025 static void desc_dbg(IADEV *iadev) {
1026 
1027   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1028   u32 i;
1029   void __iomem *tmp;
1030   // regval = readl((u32)ia_cmds->maddr);
1031   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1032   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1033                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1034                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1035   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr, 
1036                    iadev->ffL.tcq_rd);
1037   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1038   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1039   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1040   i = 0;
1041   while (tcq_st_ptr != tcq_ed_ptr) {
1042       tmp = iadev->seg_ram+tcq_st_ptr;
1043       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1044       tcq_st_ptr += 2;
1045   }
1046   for(i=0; i <iadev->num_tx_desc; i++)
1047       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1048 } 
1049   
1050   
1051 /*----------------------------- Receiving side stuff --------------------------*/  
1052  
1053 static void rx_excp_rcvd(struct atm_dev *dev)  
1054 {  
1055 #if 0 /* closing the receiving size will cause too many excp int */  
1056   IADEV *iadev;  
1057   u_short state;  
1058   u_short excpq_rd_ptr;  
1059   //u_short *ptr;  
1060   int vci, error = 1;  
1061   iadev = INPH_IA_DEV(dev);  
1062   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1063   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)  
1064   { printk("state = %x \n", state); 
1065         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;  
1066  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); 
1067         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1068             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1069         // TODO: update exception stat
1070         vci = readw(iadev->reass_ram+excpq_rd_ptr);  
1071         error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;  
1072         // pwang_test
1073         excpq_rd_ptr += 4;  
1074         if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))  
1075             excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1076         writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);  
1077         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1078   }  
1079 #endif
1080 }  
1081   
1082 static void free_desc(struct atm_dev *dev, int desc)  
1083 {  
1084         IADEV *iadev;  
1085         iadev = INPH_IA_DEV(dev);  
1086         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); 
1087         iadev->rfL.fdq_wr +=2;
1088         if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1089                 iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;  
1090         writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);  
1091 }  
1092   
1093   
1094 static int rx_pkt(struct atm_dev *dev)  
1095 {  
1096         IADEV *iadev;  
1097         struct atm_vcc *vcc;  
1098         unsigned short status;  
1099         struct rx_buf_desc __iomem *buf_desc_ptr;  
1100         int desc;   
1101         struct dle* wr_ptr;  
1102         int len;  
1103         struct sk_buff *skb;  
1104         u_int buf_addr, dma_addr;  
1105 
1106         iadev = INPH_IA_DEV(dev);  
1107         if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) 
1108         {  
1109             printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);  
1110             return -EINVAL;  
1111         }  
1112         /* mask 1st 3 bits to get the actual descno. */  
1113         desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;  
1114         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n", 
1115                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1116               printk(" pcq_wr_ptr = 0x%x\n",
1117                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1118         /* update the read pointer  - maybe we shud do this in the end*/  
1119         if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) 
1120                 iadev->rfL.pcq_rd = iadev->rfL.pcq_st;  
1121         else  
1122                 iadev->rfL.pcq_rd += 2;
1123         writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);  
1124   
1125         /* get the buffer desc entry.  
1126                 update stuff. - doesn't seem to be any update necessary  
1127         */  
1128         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1129         /* make the ptr point to the corresponding buffer desc entry */  
1130         buf_desc_ptr += desc;     
1131         if (!desc || (desc > iadev->num_rx_desc) || 
1132                       ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1133             free_desc(dev, desc);
1134             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1135             return -1;
1136         }
1137         vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];  
1138         if (!vcc)  
1139         {      
1140                 free_desc(dev, desc); 
1141                 printk("IA: null vcc, drop PDU\n");  
1142                 return -1;  
1143         }  
1144           
1145   
1146         /* might want to check the status bits for errors */  
1147         status = (u_short) (buf_desc_ptr->desc_mode);  
1148         if (status & (RX_CER | RX_PTE | RX_OFL))  
1149         {  
1150                 atomic_inc(&vcc->stats->rx_err);
1151                 IF_ERR(printk("IA: bad packet, dropping it");)  
1152                 if (status & RX_CER) { 
1153                     IF_ERR(printk(" cause: packet CRC error\n");)
1154                 }
1155                 else if (status & RX_PTE) {
1156                     IF_ERR(printk(" cause: packet time out\n");)
1157                 }
1158                 else {
1159                     IF_ERR(printk(" cause: buffer overflow\n");)
1160                 }
1161                 goto out_free_desc;
1162         }  
1163   
1164         /*  
1165                 build DLE.        
1166         */  
1167   
1168         buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;  
1169         dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;  
1170         len = dma_addr - buf_addr;  
1171         if (len > iadev->rx_buf_sz) {
1172            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1173            atomic_inc(&vcc->stats->rx_err);
1174            goto out_free_desc;
1175         }
1176                   
1177         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1178            if (vcc->vci < 32)
1179               printk("Drop control packets\n");
1180            goto out_free_desc;
1181         }
1182         skb_put(skb,len);  
1183         // pwang_test
1184         ATM_SKB(skb)->vcc = vcc;
1185         ATM_DESC(skb) = desc;        
1186         skb_queue_tail(&iadev->rx_dma_q, skb);  
1187 
1188         /* Build the DLE structure */  
1189         wr_ptr = iadev->rx_dle_q.write;  
1190         wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1191                                               len, DMA_FROM_DEVICE);
1192         wr_ptr->local_pkt_addr = buf_addr;  
1193         wr_ptr->bytes = len;    /* We don't know this do we ?? */  
1194         wr_ptr->mode = DMA_INT_ENABLE;  
1195   
1196         /* shud take care of wrap around here too. */  
1197         if(++wr_ptr == iadev->rx_dle_q.end)
1198              wr_ptr = iadev->rx_dle_q.start;
1199         iadev->rx_dle_q.write = wr_ptr;  
1200         udelay(1);  
1201         /* Increment transaction counter */  
1202         writel(1, iadev->dma+IPHASE5575_RX_COUNTER);   
1203 out:    return 0;  
1204 out_free_desc:
1205         free_desc(dev, desc);
1206         goto out;
1207 }  
1208   
1209 static void rx_intr(struct atm_dev *dev)  
1210 {  
1211   IADEV *iadev;  
1212   u_short status;  
1213   u_short state, i;  
1214   
1215   iadev = INPH_IA_DEV(dev);  
1216   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;  
1217   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1218   if (status & RX_PKT_RCVD)  
1219   {  
1220         /* do something */  
1221         /* Basically recvd an interrupt for receiving a packet.  
1222         A descriptor would have been written to the packet complete   
1223         queue. Get all the descriptors and set up dma to move the   
1224         packets till the packet complete queue is empty..  
1225         */  
1226         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1227         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) 
1228         while(!(state & PCQ_EMPTY))  
1229         {  
1230              rx_pkt(dev);  
1231              state = readl(iadev->reass_reg + STATE_REG) & 0xffff;  
1232         }  
1233         iadev->rxing = 1;
1234   }  
1235   if (status & RX_FREEQ_EMPT)  
1236   {   
1237      if (iadev->rxing) {
1238         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1239         iadev->rx_tmp_jif = jiffies; 
1240         iadev->rxing = 0;
1241      } 
1242      else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1243                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1244         for (i = 1; i <= iadev->num_rx_desc; i++)
1245                free_desc(dev, i);
1246 printk("Test logic RUN!!!!\n");
1247         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1248         iadev->rxing = 1;
1249      }
1250      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)  
1251   }  
1252 
1253   if (status & RX_EXCP_RCVD)  
1254   {  
1255         /* probably need to handle the exception queue also. */  
1256         IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)  
1257         rx_excp_rcvd(dev);  
1258   }  
1259 
1260 
1261   if (status & RX_RAW_RCVD)  
1262   {  
1263         /* need to handle the raw incoming cells. This deepnds on   
1264         whether we have programmed to receive the raw cells or not.  
1265         Else ignore. */  
1266         IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)  
1267   }  
1268 }  
1269   
1270   
1271 static void rx_dle_intr(struct atm_dev *dev)  
1272 {  
1273   IADEV *iadev;  
1274   struct atm_vcc *vcc;   
1275   struct sk_buff *skb;  
1276   int desc;  
1277   u_short state;   
1278   struct dle *dle, *cur_dle;  
1279   u_int dle_lp;  
1280   int len;
1281   iadev = INPH_IA_DEV(dev);  
1282  
1283   /* free all the dles done, that is just update our own dle read pointer   
1284         - do we really need to do this. Think not. */  
1285   /* DMA is done, just get all the recevie buffers from the rx dma queue  
1286         and push them up to the higher layer protocol. Also free the desc  
1287         associated with the buffer. */  
1288   dle = iadev->rx_dle_q.read;  
1289   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);  
1290   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));  
1291   while(dle != cur_dle)  
1292   {  
1293       /* free the DMAed skb */  
1294       skb = skb_dequeue(&iadev->rx_dma_q);  
1295       if (!skb)  
1296          goto INCR_DLE;
1297       desc = ATM_DESC(skb);
1298       free_desc(dev, desc);  
1299                
1300       if (!(len = skb->len))
1301       {  
1302           printk("rx_dle_intr: skb len 0\n");  
1303           dev_kfree_skb_any(skb);  
1304       }  
1305       else  
1306       {  
1307           struct cpcs_trailer *trailer;
1308           u_short length;
1309           struct ia_vcc *ia_vcc;
1310 
1311           dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1312                            len, DMA_FROM_DEVICE);
1313           /* no VCC related housekeeping done as yet. lets see */  
1314           vcc = ATM_SKB(skb)->vcc;
1315           if (!vcc) {
1316               printk("IA: null vcc\n");  
1317               dev_kfree_skb_any(skb);
1318               goto INCR_DLE;
1319           }
1320           ia_vcc = INPH_IA_VCC(vcc);
1321           if (ia_vcc == NULL)
1322           {
1323              atomic_inc(&vcc->stats->rx_err);
1324              atm_return(vcc, skb->truesize);
1325              dev_kfree_skb_any(skb);
1326              goto INCR_DLE;
1327            }
1328           // get real pkt length  pwang_test
1329           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1330                                  skb->len - sizeof(*trailer));
1331           length = swap_byte_order(trailer->length);
1332           if ((length > iadev->rx_buf_sz) || (length > 
1333                               (skb->len - sizeof(struct cpcs_trailer))))
1334           {
1335              atomic_inc(&vcc->stats->rx_err);
1336              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)", 
1337                                                             length, skb->len);)
1338              atm_return(vcc, skb->truesize);
1339              dev_kfree_skb_any(skb);
1340              goto INCR_DLE;
1341           }
1342           skb_trim(skb, length);
1343           
1344           /* Display the packet */  
1345           IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);  
1346           xdump(skb->data, skb->len, "RX: ");
1347           printk("\n");)
1348 
1349           IF_RX(printk("rx_dle_intr: skb push");)  
1350           vcc->push(vcc,skb);  
1351           atomic_inc(&vcc->stats->rx);
1352           iadev->rx_pkt_cnt++;
1353       }  
1354 INCR_DLE:
1355       if (++dle == iadev->rx_dle_q.end)  
1356           dle = iadev->rx_dle_q.start;  
1357   }  
1358   iadev->rx_dle_q.read = dle;  
1359   
1360   /* if the interrupts are masked because there were no free desc available,  
1361                 unmask them now. */ 
1362   if (!iadev->rxing) {
1363      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1364      if (!(state & FREEQ_EMPTY)) {
1365         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1366         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1367                                       iadev->reass_reg+REASS_MASK_REG);
1368         iadev->rxing++; 
1369      }
1370   }
1371 }  
1372   
1373   
1374 static int open_rx(struct atm_vcc *vcc)  
1375 {  
1376         IADEV *iadev;  
1377         u_short __iomem *vc_table;  
1378         u_short __iomem *reass_ptr;  
1379         IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1380 
1381         if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;    
1382         iadev = INPH_IA_DEV(vcc->dev);  
1383         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {  
1384            if (iadev->phy_type & FE_25MBIT_PHY) {
1385                printk("IA:  ABR not support\n");
1386                return -EINVAL; 
1387            }
1388         }
1389         /* Make only this VCI in the vc table valid and let all   
1390                 others be invalid entries */  
1391         vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1392         vc_table += vcc->vci;
1393         /* mask the last 6 bits and OR it with 3 for 1K VCs */  
1394 
1395         *vc_table = vcc->vci << 6;
1396         /* Also keep a list of open rx vcs so that we can attach them with  
1397                 incoming PDUs later. */  
1398         if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || 
1399                                 (vcc->qos.txtp.traffic_class == ATM_ABR))  
1400         {  
1401                 srv_cls_param_t srv_p;
1402                 init_abr_vc(iadev, &srv_p);
1403                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1404         } 
1405         else {  /* for UBR  later may need to add CBR logic */
1406                 reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1407                 reass_ptr += vcc->vci;
1408                 *reass_ptr = NO_AAL5_PKT;
1409         }
1410         
1411         if (iadev->rx_open[vcc->vci])  
1412                 printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",  
1413                         vcc->dev->number, vcc->vci);  
1414         iadev->rx_open[vcc->vci] = vcc;  
1415         return 0;  
1416 }  
1417   
1418 static int rx_init(struct atm_dev *dev)  
1419 {  
1420         IADEV *iadev;  
1421         struct rx_buf_desc __iomem *buf_desc_ptr;  
1422         unsigned long rx_pkt_start = 0;  
1423         void *dle_addr;  
1424         struct abr_vc_table  *abr_vc_table; 
1425         u16 *vc_table;  
1426         u16 *reass_table;  
1427         int i,j, vcsize_sel;  
1428         u_short freeq_st_adr;  
1429         u_short *freeq_start;  
1430   
1431         iadev = INPH_IA_DEV(dev);  
1432   //    spin_lock_init(&iadev->rx_lock); 
1433   
1434         /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1435         dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1436                                       &iadev->rx_dle_dma, GFP_KERNEL);
1437         if (!dle_addr)  {  
1438                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1439                 goto err_out;
1440         }
1441         iadev->rx_dle_q.start = (struct dle *)dle_addr;
1442         iadev->rx_dle_q.read = iadev->rx_dle_q.start;  
1443         iadev->rx_dle_q.write = iadev->rx_dle_q.start;  
1444         iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1445         /* the end of the dle q points to the entry after the last  
1446         DLE that can be used. */  
1447   
1448         /* write the upper 20 bits of the start address to rx list address register */  
1449         /* We know this is 32bit bus addressed so the following is safe */
1450         writel(iadev->rx_dle_dma & 0xfffff000,
1451                iadev->dma + IPHASE5575_RX_LIST_ADDR);  
1452         IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1453                       iadev->dma+IPHASE5575_TX_LIST_ADDR,
1454                       readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1455         printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1456                       iadev->dma+IPHASE5575_RX_LIST_ADDR,
1457                       readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1458   
1459         writew(0xffff, iadev->reass_reg+REASS_MASK_REG);  
1460         writew(0, iadev->reass_reg+MODE_REG);  
1461         writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);  
1462   
1463         /* Receive side control memory map  
1464            -------------------------------  
1465   
1466                 Buffer descr    0x0000 (736 - 23K)  
1467                 VP Table        0x5c00 (256 - 512)  
1468                 Except q        0x5e00 (128 - 512)  
1469                 Free buffer q   0x6000 (1K - 2K)  
1470                 Packet comp q   0x6800 (1K - 2K)  
1471                 Reass Table     0x7000 (1K - 2K)  
1472                 VC Table        0x7800 (1K - 2K)  
1473                 ABR VC Table    0x8000 (1K - 32K)  
1474         */  
1475           
1476         /* Base address for Buffer Descriptor Table */  
1477         writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);  
1478         /* Set the buffer size register */  
1479         writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);  
1480   
1481         /* Initialize each entry in the Buffer Descriptor Table */  
1482         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1483         buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1484         memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1485         buf_desc_ptr++;  
1486         rx_pkt_start = iadev->rx_pkt_ram;  
1487         for(i=1; i<=iadev->num_rx_desc; i++)  
1488         {  
1489                 memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1490                 buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;  
1491                 buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;  
1492                 buf_desc_ptr++;           
1493                 rx_pkt_start += iadev->rx_buf_sz;  
1494         }  
1495         IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1496         i = FREE_BUF_DESC_Q*iadev->memSize; 
1497         writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE); 
1498         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1499         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1500                                          iadev->reass_reg+FREEQ_ED_ADR);
1501         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1502         writew(i+iadev->num_rx_desc*sizeof(u_short), 
1503                                         iadev->reass_reg+FREEQ_WR_PTR);    
1504         /* Fill the FREEQ with all the free descriptors. */  
1505         freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);  
1506         freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);  
1507         for(i=1; i<=iadev->num_rx_desc; i++)  
1508         {  
1509                 *freeq_start = (u_short)i;  
1510                 freeq_start++;  
1511         }  
1512         IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1513         /* Packet Complete Queue */
1514         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1515         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1516         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1517         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1518         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1519 
1520         /* Exception Queue */
1521         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1522         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1523         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), 
1524                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1525         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1526         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); 
1527  
1528         /* Load local copy of FREEQ and PCQ ptrs */
1529         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1530         iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1531         iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1532         iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1533         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1534         iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1535         iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1536         iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1537         
1538         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", 
1539               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, 
1540               iadev->rfL.pcq_wr);)                
1541         /* just for check - no VP TBL */  
1542         /* VP Table */  
1543         /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */  
1544         /* initialize VP Table for invalid VPIs  
1545                 - I guess we can write all 1s or 0x000f in the entire memory  
1546                   space or something similar.  
1547         */  
1548   
1549         /* This seems to work and looks right to me too !!! */  
1550         i =  REASS_TABLE * iadev->memSize;
1551         writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);   
1552         /* initialize Reassembly table to I don't know what ???? */  
1553         reass_table = (u16 *)(iadev->reass_ram+i);  
1554         j = REASS_TABLE_SZ * iadev->memSize;
1555         for(i=0; i < j; i++)  
1556                 *reass_table++ = NO_AAL5_PKT;  
1557        i = 8*1024;
1558        vcsize_sel =  0;
1559        while (i != iadev->num_vc) {
1560           i /= 2;
1561           vcsize_sel++;
1562        }
1563        i = RX_VC_TABLE * iadev->memSize;
1564        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1565        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);  
1566         j = RX_VC_TABLE_SZ * iadev->memSize;
1567         for(i = 0; i < j; i++)  
1568         {  
1569                 /* shift the reassembly pointer by 3 + lower 3 bits of   
1570                 vc_lkup_base register (=3 for 1K VCs) and the last byte   
1571                 is those low 3 bits.   
1572                 Shall program this later.  
1573                 */  
1574                 *vc_table = (i << 6) | 15;      /* for invalid VCI */  
1575                 vc_table++;  
1576         }  
1577         /* ABR VC table */
1578         i =  ABR_VC_TABLE * iadev->memSize;
1579         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1580                    
1581         i = ABR_VC_TABLE * iadev->memSize;
1582         abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);  
1583         j = REASS_TABLE_SZ * iadev->memSize;
1584         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1585         for(i = 0; i < j; i++) {                
1586                 abr_vc_table->rdf = 0x0003;
1587                 abr_vc_table->air = 0x5eb1;
1588                 abr_vc_table++;         
1589         }  
1590 
1591         /* Initialize other registers */  
1592   
1593         /* VP Filter Register set for VC Reassembly only */  
1594         writew(0xff00, iadev->reass_reg+VP_FILTER);  
1595         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1596         writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1597 
1598         /* Packet Timeout Count  related Registers : 
1599            Set packet timeout to occur in about 3 seconds
1600            Set Packet Aging Interval count register to overflow in about 4 us
1601         */  
1602         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1603 
1604         i = (j >> 6) & 0xFF;
1605         j += 2 * (j - 1);
1606         i |= ((j << 2) & 0xFF00);
1607         writew(i, iadev->reass_reg+TMOUT_RANGE);
1608 
1609         /* initiate the desc_tble */
1610         for(i=0; i<iadev->num_tx_desc;i++)
1611             iadev->desc_tbl[i].timestamp = 0;
1612 
1613         /* to clear the interrupt status register - read it */  
1614         readw(iadev->reass_reg+REASS_INTR_STATUS_REG);   
1615   
1616         /* Mask Register - clear it */  
1617         writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);  
1618   
1619         skb_queue_head_init(&iadev->rx_dma_q);  
1620         iadev->rx_free_desc_qhead = NULL;   
1621 
1622         iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1623         if (!iadev->rx_open) {
1624                 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1625                 dev->number);  
1626                 goto err_free_dle;
1627         }  
1628 
1629         iadev->rxing = 1;
1630         iadev->rx_pkt_cnt = 0;
1631         /* Mode Register */  
1632         writew(R_ONLINE, iadev->reass_reg+MODE_REG);  
1633         return 0;  
1634 
1635 err_free_dle:
1636         dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1637                           iadev->rx_dle_dma);
1638 err_out:
1639         return -ENOMEM;
1640 }  
1641   
1642 
1643 /*  
1644         The memory map suggested in appendix A and the coding for it.   
1645         Keeping it around just in case we change our mind later.  
1646   
1647                 Buffer descr    0x0000 (128 - 4K)  
1648                 UBR sched       0x1000 (1K - 4K)  
1649                 UBR Wait q      0x2000 (1K - 4K)  
1650                 Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)  
1651                                         (128 - 256) each  
1652                 extended VC     0x4000 (1K - 8K)  
1653                 ABR sched       0x6000  and ABR wait queue (1K - 2K) each  
1654                 CBR sched       0x7000 (as needed)  
1655                 VC table        0x8000 (1K - 32K)  
1656 */  
1657   
1658 static void tx_intr(struct atm_dev *dev)  
1659 {  
1660         IADEV *iadev;  
1661         unsigned short status;  
1662         unsigned long flags;
1663 
1664         iadev = INPH_IA_DEV(dev);  
1665   
1666         status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);  
1667         if (status & TRANSMIT_DONE){
1668 
1669            IF_EVENT(printk("Transmit Done Intr logic run\n");)
1670            spin_lock_irqsave(&iadev->tx_lock, flags);
1671            ia_tx_poll(iadev);
1672            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1673            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1674            if (iadev->close_pending)  
1675                wake_up(&iadev->close_wait);
1676         }         
1677         if (status & TCQ_NOT_EMPTY)  
1678         {  
1679             IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)  
1680         }  
1681 }  
1682   
1683 static void tx_dle_intr(struct atm_dev *dev)
1684 {
1685         IADEV *iadev;
1686         struct dle *dle, *cur_dle; 
1687         struct sk_buff *skb;
1688         struct atm_vcc *vcc;
1689         struct ia_vcc  *iavcc;
1690         u_int dle_lp;
1691         unsigned long flags;
1692 
1693         iadev = INPH_IA_DEV(dev);
1694         spin_lock_irqsave(&iadev->tx_lock, flags);   
1695         dle = iadev->tx_dle_q.read;
1696         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & 
1697                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1698         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1699         while (dle != cur_dle)
1700         {
1701             /* free the DMAed skb */ 
1702             skb = skb_dequeue(&iadev->tx_dma_q); 
1703             if (!skb) break;
1704 
1705             /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1706             if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1707                 dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1708                                  DMA_TO_DEVICE);
1709             }
1710             vcc = ATM_SKB(skb)->vcc;
1711             if (!vcc) {
1712                   printk("tx_dle_intr: vcc is null\n");
1713                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1714                   dev_kfree_skb_any(skb);
1715 
1716                   return;
1717             }
1718             iavcc = INPH_IA_VCC(vcc);
1719             if (!iavcc) {
1720                   printk("tx_dle_intr: iavcc is null\n");
1721                   spin_unlock_irqrestore(&iadev->tx_lock, flags);
1722                   dev_kfree_skb_any(skb);
1723                   return;
1724             }
1725             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1726                if ((vcc->pop) && (skb->len != 0))
1727                {     
1728                  vcc->pop(vcc, skb);
1729                } 
1730                else {
1731                  dev_kfree_skb_any(skb);
1732                }
1733             }
1734             else { /* Hold the rate-limited skb for flow control */
1735                IA_SKB_STATE(skb) |= IA_DLED;
1736                skb_queue_tail(&iavcc->txing_skb, skb);
1737             }
1738             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1739             if (++dle == iadev->tx_dle_q.end)
1740                  dle = iadev->tx_dle_q.start;
1741         }
1742         iadev->tx_dle_q.read = dle;
1743         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1744 }
1745   
1746 static int open_tx(struct atm_vcc *vcc)  
1747 {  
1748         struct ia_vcc *ia_vcc;  
1749         IADEV *iadev;  
1750         struct main_vc *vc;  
1751         struct ext_vc *evc;  
1752         int ret;
1753         IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)  
1754         if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;  
1755         iadev = INPH_IA_DEV(vcc->dev);  
1756         
1757         if (iadev->phy_type & FE_25MBIT_PHY) {
1758            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1759                printk("IA:  ABR not support\n");
1760                return -EINVAL; 
1761            }
1762           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1763                printk("IA:  CBR not support\n");
1764                return -EINVAL; 
1765           }
1766         }
1767         ia_vcc =  INPH_IA_VCC(vcc);
1768         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1769         if (vcc->qos.txtp.max_sdu > 
1770                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1771            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1772                   vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1773            vcc->dev_data = NULL;
1774            kfree(ia_vcc);
1775            return -EINVAL; 
1776         }
1777         ia_vcc->vc_desc_cnt = 0;
1778         ia_vcc->txing = 1;
1779 
1780         /* find pcr */
1781         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) 
1782            vcc->qos.txtp.pcr = iadev->LineRate;
1783         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1784            vcc->qos.txtp.pcr = iadev->LineRate;
1785         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) 
1786            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1787         if (vcc->qos.txtp.pcr > iadev->LineRate)
1788              vcc->qos.txtp.pcr = iadev->LineRate;
1789         ia_vcc->pcr = vcc->qos.txtp.pcr;
1790 
1791         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1792         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1793         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1794         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1795         if (ia_vcc->pcr < iadev->rate_limit)
1796            skb_queue_head_init (&ia_vcc->txing_skb);
1797         if (ia_vcc->pcr < iadev->rate_limit) {
1798            struct sock *sk = sk_atm(vcc);
1799 
1800            if (vcc->qos.txtp.max_sdu != 0) {
1801                if (ia_vcc->pcr > 60000)
1802                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1803                else if (ia_vcc->pcr > 2000)
1804                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1805                else
1806                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1807            }
1808            else
1809              sk->sk_sndbuf = 24576;
1810         }
1811            
1812         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
1813         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
1814         vc += vcc->vci;  
1815         evc += vcc->vci;  
1816         memset((caddr_t)vc, 0, sizeof(*vc));  
1817         memset((caddr_t)evc, 0, sizeof(*evc));  
1818           
1819         /* store the most significant 4 bits of vci as the last 4 bits   
1820                 of first part of atm header.  
1821            store the last 12 bits of vci as first 12 bits of the second  
1822                 part of the atm header.  
1823         */  
1824         evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;  
1825         evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;  
1826  
1827         /* check the following for different traffic classes */  
1828         if (vcc->qos.txtp.traffic_class == ATM_UBR)  
1829         {  
1830                 vc->type = UBR;  
1831                 vc->status = CRC_APPEND;
1832                 vc->acr = cellrate_to_float(iadev->LineRate);  
1833                 if (vcc->qos.txtp.pcr > 0) 
1834                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);  
1835                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", 
1836                                              vcc->qos.txtp.max_pcr,vc->acr);)
1837         }  
1838         else if (vcc->qos.txtp.traffic_class == ATM_ABR)  
1839         {       srv_cls_param_t srv_p;
1840                 IF_ABR(printk("Tx ABR VCC\n");)  
1841                 init_abr_vc(iadev, &srv_p);
1842                 if (vcc->qos.txtp.pcr > 0) 
1843                    srv_p.pcr = vcc->qos.txtp.pcr;
1844                 if (vcc->qos.txtp.min_pcr > 0) {
1845                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1846                    if (tmpsum > iadev->LineRate)
1847                        return -EBUSY;
1848                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1849                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1850                 } 
1851                 else srv_p.mcr = 0;
1852                 if (vcc->qos.txtp.icr)
1853                    srv_p.icr = vcc->qos.txtp.icr;
1854                 if (vcc->qos.txtp.tbe)
1855                    srv_p.tbe = vcc->qos.txtp.tbe;
1856                 if (vcc->qos.txtp.frtt)
1857                    srv_p.frtt = vcc->qos.txtp.frtt;
1858                 if (vcc->qos.txtp.rif)
1859                    srv_p.rif = vcc->qos.txtp.rif;
1860                 if (vcc->qos.txtp.rdf)
1861                    srv_p.rdf = vcc->qos.txtp.rdf;
1862                 if (vcc->qos.txtp.nrm_pres)
1863                    srv_p.nrm = vcc->qos.txtp.nrm;
1864                 if (vcc->qos.txtp.trm_pres)
1865                    srv_p.trm = vcc->qos.txtp.trm;
1866                 if (vcc->qos.txtp.adtf_pres)
1867                    srv_p.adtf = vcc->qos.txtp.adtf;
1868                 if (vcc->qos.txtp.cdf_pres)
1869                    srv_p.cdf = vcc->qos.txtp.cdf;    
1870                 if (srv_p.icr > srv_p.pcr)
1871                    srv_p.icr = srv_p.pcr;    
1872                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n", 
1873                                                       srv_p.pcr, srv_p.mcr);)
1874                 ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1875         } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1876                 if (iadev->phy_type & FE_25MBIT_PHY) {
1877                     printk("IA:  CBR not support\n");
1878                     return -EINVAL; 
1879                 }
1880                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1881                    IF_CBR(printk("PCR is not available\n");)
1882                    return -1;
1883                 }
1884                 vc->type = CBR;
1885                 vc->status = CRC_APPEND;
1886                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {     
1887                     return ret;
1888                 }
1889         } else {
1890                 printk("iadev:  Non UBR, ABR and CBR traffic not supported\n");
1891         }
1892         
1893         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1894         IF_EVENT(printk("ia open_tx returning \n");)  
1895         return 0;  
1896 }  
1897   
1898   
1899 static int tx_init(struct atm_dev *dev)  
1900 {  
1901         IADEV *iadev;  
1902         struct tx_buf_desc *buf_desc_ptr;
1903         unsigned int tx_pkt_start;  
1904         void *dle_addr;  
1905         int i;  
1906         u_short tcq_st_adr;  
1907         u_short *tcq_start;  
1908         u_short prq_st_adr;  
1909         u_short *prq_start;  
1910         struct main_vc *vc;  
1911         struct ext_vc *evc;   
1912         u_short tmp16;
1913         u32 vcsize_sel;
1914  
1915         iadev = INPH_IA_DEV(dev);  
1916         spin_lock_init(&iadev->tx_lock);
1917  
1918         IF_INIT(printk("Tx MASK REG: 0x%0x\n", 
1919                                 readw(iadev->seg_reg+SEG_MASK_REG));)  
1920 
1921         /* Allocate 4k (boundary aligned) bytes */
1922         dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1923                                       &iadev->tx_dle_dma, GFP_KERNEL);
1924         if (!dle_addr)  {
1925                 printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1926                 goto err_out;
1927         }
1928         iadev->tx_dle_q.start = (struct dle*)dle_addr;  
1929         iadev->tx_dle_q.read = iadev->tx_dle_q.start;  
1930         iadev->tx_dle_q.write = iadev->tx_dle_q.start;  
1931         iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1932 
1933         /* write the upper 20 bits of the start address to tx list address register */  
1934         writel(iadev->tx_dle_dma & 0xfffff000,
1935                iadev->dma + IPHASE5575_TX_LIST_ADDR);  
1936         writew(0xffff, iadev->seg_reg+SEG_MASK_REG);  
1937         writew(0, iadev->seg_reg+MODE_REG_0);  
1938         writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);  
1939         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1940         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1941         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1942   
1943         /*  
1944            Transmit side control memory map  
1945            --------------------------------    
1946          Buffer descr   0x0000 (128 - 4K)  
1947          Commn queues   0x1000  Transmit comp, Packet ready(0x1400)   
1948                                         (512 - 1K) each  
1949                                         TCQ - 4K, PRQ - 5K  
1950          CBR Table      0x1800 (as needed) - 6K  
1951          UBR Table      0x3000 (1K - 4K) - 12K  
1952          UBR Wait queue 0x4000 (1K - 4K) - 16K  
1953          ABR sched      0x5000  and ABR wait queue (1K - 2K) each  
1954                                 ABR Tbl - 20K, ABR Wq - 22K   
1955          extended VC    0x6000 (1K - 8K) - 24K  
1956          VC Table       0x8000 (1K - 32K) - 32K  
1957           
1958         Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl  
1959         and Wait q, which can be allotted later.  
1960         */  
1961      
1962         /* Buffer Descriptor Table Base address */  
1963         writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);  
1964   
1965         /* initialize each entry in the buffer descriptor table */  
1966         buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);  
1967         memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1968         buf_desc_ptr++;  
1969         tx_pkt_start = TX_PACKET_RAM;  
1970         for(i=1; i<=iadev->num_tx_desc; i++)  
1971         {  
1972                 memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));  
1973                 buf_desc_ptr->desc_mode = AAL5;  
1974                 buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;  
1975                 buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;  
1976                 buf_desc_ptr++;           
1977                 tx_pkt_start += iadev->tx_buf_sz;  
1978         }  
1979         iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
1980                                       sizeof(*iadev->tx_buf),
1981                                       GFP_KERNEL);
1982         if (!iadev->tx_buf) {
1983             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1984             goto err_free_dle;
1985         }
1986         for (i= 0; i< iadev->num_tx_desc; i++)
1987         {
1988             struct cpcs_trailer *cpcs;
1989  
1990             cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1991             if(!cpcs) {                
1992                 printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); 
1993                 goto err_free_tx_bufs;
1994             }
1995             iadev->tx_buf[i].cpcs = cpcs;
1996             iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
1997                                                        cpcs,
1998                                                        sizeof(*cpcs),
1999                                                        DMA_TO_DEVICE);
2000         }
2001         iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
2002                                         sizeof(*iadev->desc_tbl),
2003                                         GFP_KERNEL);
2004         if (!iadev->desc_tbl) {
2005                 printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2006                 goto err_free_all_tx_bufs;
2007         }
2008   
2009         /* Communication Queues base address */  
2010         i = TX_COMP_Q * iadev->memSize;
2011         writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);  
2012   
2013         /* Transmit Complete Queue */  
2014         writew(i, iadev->seg_reg+TCQ_ST_ADR);  
2015         writew(i, iadev->seg_reg+TCQ_RD_PTR);  
2016         writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); 
2017         iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2018         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2019                                               iadev->seg_reg+TCQ_ED_ADR); 
2020         /* Fill the TCQ with all the free descriptors. */  
2021         tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);  
2022         tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);  
2023         for(i=1; i<=iadev->num_tx_desc; i++)  
2024         {  
2025                 *tcq_start = (u_short)i;  
2026                 tcq_start++;  
2027         }  
2028   
2029         /* Packet Ready Queue */  
2030         i = PKT_RDY_Q * iadev->memSize; 
2031         writew(i, iadev->seg_reg+PRQ_ST_ADR);  
2032         writew(i+2 * iadev->num_tx_desc * sizeof(u_short), 
2033                                               iadev->seg_reg+PRQ_ED_ADR);
2034         writew(i, iadev->seg_reg+PRQ_RD_PTR);  
2035         writew(i, iadev->seg_reg+PRQ_WR_PTR);  
2036          
2037         /* Load local copy of PRQ and TCQ ptrs */
2038         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2039         iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2040         iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2041 
2042         iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2043         iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2044         iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2045 
2046         /* Just for safety initializing the queue to have desc 1 always */  
2047         /* Fill the PRQ with all the free descriptors. */  
2048         prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);  
2049         prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);  
2050         for(i=1; i<=iadev->num_tx_desc; i++)  
2051         {  
2052                 *prq_start = (u_short)0;        /* desc 1 in all entries */  
2053                 prq_start++;  
2054         }  
2055         /* CBR Table */  
2056         IF_INIT(printk("Start CBR Init\n");)
2057 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2058         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2059 #else /* Charlie's logic is wrong ? */
2060         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2061         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2062         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2063 #endif
2064 
2065         IF_INIT(printk("value in register = 0x%x\n",
2066                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2067         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2068         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2069         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2070                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2071         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2072         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2073         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2074         IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2075                iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2076         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2077           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2078           readw(iadev->seg_reg+CBR_TAB_END+1));)
2079 
2080         /* Initialize the CBR Schedualing Table */
2081         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 
2082                                                           0, iadev->num_vc*6); 
2083         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2084         iadev->CbrEntryPt = 0;
2085         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2086         iadev->NumEnabledCBR = 0;
2087 
2088         /* UBR scheduling Table and wait queue */  
2089         /* initialize all bytes of UBR scheduler table and wait queue to 0   
2090                 - SCHEDSZ is 1K (# of entries).  
2091                 - UBR Table size is 4K  
2092                 - UBR wait queue is 4K  
2093            since the table and wait queues are contiguous, all the bytes   
2094            can be initialized by one memeset.
2095         */  
2096         
2097         vcsize_sel = 0;
2098         i = 8*1024;
2099         while (i != iadev->num_vc) {
2100           i /= 2;
2101           vcsize_sel++;
2102         }
2103  
2104         i = MAIN_VC_TABLE * iadev->memSize;
2105         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2106         i =  EXT_VC_TABLE * iadev->memSize;
2107         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2108         i = UBR_SCHED_TABLE * iadev->memSize;
2109         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2110         i = UBR_WAIT_Q * iadev->memSize; 
2111         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2112         memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2113                                                        0, iadev->num_vc*8);
2114         /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/  
2115         /* initialize all bytes of ABR scheduler table and wait queue to 0   
2116                 - SCHEDSZ is 1K (# of entries).  
2117                 - ABR Table size is 2K  
2118                 - ABR wait queue is 2K  
2119            since the table and wait queues are contiguous, all the bytes   
2120            can be initialized by one memeset.
2121         */  
2122         i = ABR_SCHED_TABLE * iadev->memSize;
2123         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2124         i = ABR_WAIT_Q * iadev->memSize;
2125         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2126  
2127         i = ABR_SCHED_TABLE*iadev->memSize;
2128         memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2129         vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;  
2130         evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;  
2131         iadev->testTable = kmalloc_array(iadev->num_vc,
2132                                          sizeof(*iadev->testTable),
2133                                          GFP_KERNEL);
2134         if (!iadev->testTable) {
2135            printk("Get freepage  failed\n");
2136            goto err_free_desc_tbl;
2137         }
2138         for(i=0; i<iadev->num_vc; i++)  
2139         {  
2140                 memset((caddr_t)vc, 0, sizeof(*vc));  
2141                 memset((caddr_t)evc, 0, sizeof(*evc));  
2142                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2143                                                 GFP_KERNEL);
2144                 if (!iadev->testTable[i])
2145                         goto err_free_test_tables;
2146                 iadev->testTable[i]->lastTime = 0;
2147                 iadev->testTable[i]->fract = 0;
2148                 iadev->testTable[i]->vc_status = VC_UBR;
2149                 vc++;  
2150                 evc++;  
2151         }  
2152   
2153         /* Other Initialization */  
2154           
2155         /* Max Rate Register */  
2156         if (iadev->phy_type & FE_25MBIT_PHY) {
2157            writew(RATE25, iadev->seg_reg+MAXRATE);  
2158            writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2159         }
2160         else {
2161            writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2162            writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);  
2163         }
2164         /* Set Idle Header Reigisters to be sure */  
2165         writew(0, iadev->seg_reg+IDLEHEADHI);  
2166         writew(0, iadev->seg_reg+IDLEHEADLO);  
2167   
2168         /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2169         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); 
2170 
2171         iadev->close_pending = 0;
2172         init_waitqueue_head(&iadev->close_wait);
2173         init_waitqueue_head(&iadev->timeout_wait);
2174         skb_queue_head_init(&iadev->tx_dma_q);  
2175         ia_init_rtn_q(&iadev->tx_return_q);  
2176 
2177         /* RM Cell Protocol ID and Message Type */  
2178         writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);  
2179         skb_queue_head_init (&iadev->tx_backlog);
2180   
2181         /* Mode Register 1 */  
2182         writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);  
2183   
2184         /* Mode Register 0 */  
2185         writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);  
2186   
2187         /* Interrupt Status Register - read to clear */  
2188         readw(iadev->seg_reg+SEG_INTR_STATUS_REG);  
2189   
2190         /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */  
2191         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2192         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);  
2193         iadev->tx_pkt_cnt = 0;
2194         iadev->rate_limit = iadev->LineRate / 3;
2195   
2196         return 0;
2197 
2198 err_free_test_tables:
2199         while (--i >= 0)
2200                 kfree(iadev->testTable[i]);
2201         kfree(iadev->testTable);
2202 err_free_desc_tbl:
2203         kfree(iadev->desc_tbl);
2204 err_free_all_tx_bufs:
2205         i = iadev->num_tx_desc;
2206 err_free_tx_bufs:
2207         while (--i >= 0) {
2208                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2209 
2210                 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2211                                  sizeof(*desc->cpcs), DMA_TO_DEVICE);
2212                 kfree(desc->cpcs);
2213         }
2214         kfree(iadev->tx_buf);
2215 err_free_dle:
2216         dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2217                           iadev->tx_dle_dma);
2218 err_out:
2219         return -ENOMEM;
2220 }   
2221    
2222 static irqreturn_t ia_int(int irq, void *dev_id)  
2223 {  
2224    struct atm_dev *dev;  
2225    IADEV *iadev;  
2226    unsigned int status;  
2227    int handled = 0;
2228 
2229    dev = dev_id;  
2230    iadev = INPH_IA_DEV(dev);  
2231    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))  
2232    { 
2233         handled = 1;
2234         IF_EVENT(printk("ia_int: status = 0x%x\n", status);) 
2235         if (status & STAT_REASSINT)  
2236         {  
2237            /* do something */  
2238            IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) 
2239            rx_intr(dev);  
2240         }  
2241         if (status & STAT_DLERINT)  
2242         {  
2243            /* Clear this bit by writing a 1 to it. */  
2244            writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2245            rx_dle_intr(dev);  
2246         }  
2247         if (status & STAT_SEGINT)  
2248         {  
2249            /* do something */ 
2250            IF_EVENT(printk("IA: tx_intr \n");) 
2251            tx_intr(dev);  
2252         }  
2253         if (status & STAT_DLETINT)  
2254         {  
2255            writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2256            tx_dle_intr(dev);  
2257         }  
2258         if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))  
2259         {  
2260            if (status & STAT_FEINT) 
2261                ia_frontend_intr(iadev);
2262         }  
2263    }
2264    return IRQ_RETVAL(handled);
2265 }  
2266           
2267           
2268           
2269 /*----------------------------- entries --------------------------------*/  
2270 static int get_esi(struct atm_dev *dev)  
2271 {  
2272         IADEV *iadev;  
2273         int i;  
2274         u32 mac1;  
2275         u16 mac2;  
2276           
2277         iadev = INPH_IA_DEV(dev);  
2278         mac1 = cpu_to_be32(le32_to_cpu(readl(  
2279                                 iadev->reg+IPHASE5575_MAC1)));  
2280         mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));  
2281         IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)  
2282         for (i=0; i<MAC1_LEN; i++)  
2283                 dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));  
2284           
2285         for (i=0; i<MAC2_LEN; i++)  
2286                 dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));  
2287         return 0;  
2288 }  
2289           
2290 static int reset_sar(struct atm_dev *dev)  
2291 {  
2292         IADEV *iadev;  
2293         int i, error = 1;  
2294         unsigned int pci[64];  
2295           
2296         iadev = INPH_IA_DEV(dev);  
2297         for(i=0; i<64; i++)  
2298           if ((error = pci_read_config_dword(iadev->pci,  
2299                                 i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)  
2300               return error;  
2301         writel(0, iadev->reg+IPHASE5575_EXT_RESET);  
2302         for(i=0; i<64; i++)  
2303           if ((error = pci_write_config_dword(iadev->pci,  
2304                                         i*4, pci[i])) != PCIBIOS_SUCCESSFUL)  
2305             return error;  
2306         udelay(5);  
2307         return 0;  
2308 }  
2309           
2310           
2311 static int ia_init(struct atm_dev *dev)
2312 {  
2313         IADEV *iadev;  
2314         unsigned long real_base;
2315         void __iomem *base;
2316         unsigned short command;  
2317         int error, i; 
2318           
2319         /* The device has been identified and registered. Now we read   
2320            necessary configuration info like memory base address,   
2321            interrupt number etc */  
2322           
2323         IF_INIT(printk(">ia_init\n");)  
2324         dev->ci_range.vpi_bits = 0;  
2325         dev->ci_range.vci_bits = NR_VCI_LD;  
2326 
2327         iadev = INPH_IA_DEV(dev);  
2328         real_base = pci_resource_start (iadev->pci, 0);
2329         iadev->irq = iadev->pci->irq;
2330                   
2331         error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2332         if (error) {
2333                 printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",  
2334                                 dev->number,error);  
2335                 return -EINVAL;  
2336         }  
2337         IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",  
2338                         dev->number, iadev->pci->revision, real_base, iadev->irq);)
2339           
2340         /* find mapping size of board */  
2341           
2342         iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2343 
2344         if (iadev->pci_map_size == 0x100000){
2345           iadev->num_vc = 4096;
2346           dev->ci_range.vci_bits = NR_VCI_4K_LD;  
2347           iadev->memSize = 4;
2348         }
2349         else if (iadev->pci_map_size == 0x40000) {
2350           iadev->num_vc = 1024;
2351           iadev->memSize = 1;
2352         }
2353         else {
2354            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2355            return -EINVAL;
2356         }
2357         IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)  
2358           
2359         /* enable bus mastering */
2360         pci_set_master(iadev->pci);
2361 
2362         /*  
2363          * Delay at least 1us before doing any mem accesses (how 'bout 10?)  
2364          */  
2365         udelay(10);  
2366           
2367         /* mapping the physical address to a virtual address in address space */  
2368         base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */  
2369           
2370         if (!base)  
2371         {  
2372                 printk(DEV_LABEL " (itf %d): can't set up page mapping\n",  
2373                             dev->number);  
2374                 return -ENOMEM;
2375         }  
2376         IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",  
2377                         dev->number, iadev->pci->revision, base, iadev->irq);)
2378           
2379         /* filling the iphase dev structure */  
2380         iadev->mem = iadev->pci_map_size /2;  
2381         iadev->real_base = real_base;  
2382         iadev->base = base;  
2383                   
2384         /* Bus Interface Control Registers */  
2385         iadev->reg = base + REG_BASE;
2386         /* Segmentation Control Registers */  
2387         iadev->seg_reg = base + SEG_BASE;
2388         /* Reassembly Control Registers */  
2389         iadev->reass_reg = base + REASS_BASE;  
2390         /* Front end/ DMA control registers */  
2391         iadev->phy = base + PHY_BASE;  
2392         iadev->dma = base + PHY_BASE;  
2393         /* RAM - Segmentation RAm and Reassembly RAM */  
2394         iadev->ram = base + ACTUAL_RAM_BASE;  
2395         iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;  
2396         iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;  
2397   
2398         /* lets print out the above */  
2399         IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n", 
2400           iadev->reg,iadev->seg_reg,iadev->reass_reg, 
2401           iadev->phy, iadev->ram, iadev->seg_ram, 
2402           iadev->reass_ram);) 
2403           
2404         /* lets try reading the MAC address */  
2405         error = get_esi(dev);  
2406         if (error) {
2407           iounmap(iadev->base);
2408           return error;  
2409         }
2410         printk("IA: ");
2411         for (i=0; i < ESI_LEN; i++)  
2412                 printk("%s%02X",i ? "-" : "",dev->esi[i]);  
2413         printk("\n");  
2414   
2415         /* reset SAR */  
2416         if (reset_sar(dev)) {
2417            iounmap(iadev->base);
2418            printk("IA: reset SAR fail, please try again\n");
2419            return 1;
2420         }
2421         return 0;  
2422 }  
2423 
2424 static void ia_update_stats(IADEV *iadev) {
2425     if (!iadev->carrier_detect)
2426         return;
2427     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2428     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2429     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2430     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2431     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2432     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2433     return;
2434 }
2435   
2436 static void ia_led_timer(struct timer_list *unused) {
2437         unsigned long flags;
2438         static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2439         u_char i;
2440         static u32 ctrl_reg; 
2441         for (i = 0; i < iadev_count; i++) {
2442            if (ia_dev[i]) {
2443               ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2444               if (blinking[i] == 0) {
2445                  blinking[i]++;
2446                  ctrl_reg &= (~CTRL_LED);
2447                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2448                  ia_update_stats(ia_dev[i]);
2449               }
2450               else {
2451                  blinking[i] = 0;
2452                  ctrl_reg |= CTRL_LED;
2453                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2454                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2455                  if (ia_dev[i]->close_pending)  
2456                     wake_up(&ia_dev[i]->close_wait);
2457                  ia_tx_poll(ia_dev[i]);
2458                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2459               }
2460            }
2461         }
2462         mod_timer(&ia_timer, jiffies + HZ / 4);
2463         return;
2464 }
2465 
2466 static void ia_phy_put(struct atm_dev *dev, unsigned char value,   
2467         unsigned long addr)  
2468 {  
2469         writel(value, INPH_IA_DEV(dev)->phy+addr);  
2470 }  
2471   
2472 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)  
2473 {  
2474         return readl(INPH_IA_DEV(dev)->phy+addr);  
2475 }  
2476 
2477 static void ia_free_tx(IADEV *iadev)
2478 {
2479         int i;
2480 
2481         kfree(iadev->desc_tbl);
2482         for (i = 0; i < iadev->num_vc; i++)
2483                 kfree(iadev->testTable[i]);
2484         kfree(iadev->testTable);
2485         for (i = 0; i < iadev->num_tx_desc; i++) {
2486                 struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2487 
2488                 dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2489                                  sizeof(*desc->cpcs), DMA_TO_DEVICE);
2490                 kfree(desc->cpcs);
2491         }
2492         kfree(iadev->tx_buf);
2493         dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2494                           iadev->tx_dle_dma);
2495 }
2496 
2497 static void ia_free_rx(IADEV *iadev)
2498 {
2499         kfree(iadev->rx_open);
2500         dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2501                           iadev->rx_dle_dma);
2502 }
2503 
2504 static int ia_start(struct atm_dev *dev)
2505 {  
2506         IADEV *iadev;  
2507         int error;  
2508         unsigned char phy;  
2509         u32 ctrl_reg;  
2510         IF_EVENT(printk(">ia_start\n");)  
2511         iadev = INPH_IA_DEV(dev);  
2512         if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2513                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",  
2514                     dev->number, iadev->irq);  
2515                 error = -EAGAIN;
2516                 goto err_out;
2517         }  
2518         /* @@@ should release IRQ on error */  
2519         /* enabling memory + master */  
2520         if ((error = pci_write_config_word(iadev->pci,   
2521                                 PCI_COMMAND,   
2522                                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))   
2523         {  
2524                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"  
2525                     "master (0x%x)\n",dev->number, error);  
2526                 error = -EIO;  
2527                 goto err_free_irq;
2528         }  
2529         udelay(10);  
2530   
2531         /* Maybe we should reset the front end, initialize Bus Interface Control   
2532                 Registers and see. */  
2533   
2534         IF_INIT(printk("Bus ctrl reg: %08x\n", 
2535                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2536         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2537         ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))  
2538                         | CTRL_B8  
2539                         | CTRL_B16  
2540                         | CTRL_B32  
2541                         | CTRL_B48  
2542                         | CTRL_B64  
2543                         | CTRL_B128  
2544                         | CTRL_ERRMASK  
2545                         | CTRL_DLETMASK         /* shud be removed l8r */  
2546                         | CTRL_DLERMASK  
2547                         | CTRL_SEGMASK  
2548                         | CTRL_REASSMASK          
2549                         | CTRL_FEMASK  
2550                         | CTRL_CSPREEMPT;  
2551   
2552        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2553   
2554         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2555                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));  
2556            printk("Bus status reg after init: %08x\n", 
2557                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)  
2558     
2559         ia_hw_type(iadev); 
2560         error = tx_init(dev);  
2561         if (error)
2562                 goto err_free_irq;
2563         error = rx_init(dev);  
2564         if (error)
2565                 goto err_free_tx;
2566   
2567         ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);  
2568         writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);   
2569         IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", 
2570                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)  
2571         phy = 0; /* resolve compiler complaint */
2572         IF_INIT ( 
2573         if ((phy=ia_phy_get(dev,0)) == 0x30)  
2574                 printk("IA: pm5346,rev.%d\n",phy&0x0f);  
2575         else  
2576                 printk("IA: utopia,rev.%0x\n",phy);) 
2577 
2578         if (iadev->phy_type &  FE_25MBIT_PHY)
2579            ia_mb25_init(iadev);
2580         else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2581            ia_suni_pm7345_init(iadev);
2582         else {
2583                 error = suni_init(dev);
2584                 if (error)
2585                         goto err_free_rx;
2586                 if (dev->phy->start) {
2587                         error = dev->phy->start(dev);
2588                         if (error)
2589                                 goto err_free_rx;
2590                 }
2591                 /* Get iadev->carrier_detect status */
2592                 ia_frontend_intr(iadev);
2593         }
2594         return 0;
2595 
2596 err_free_rx:
2597         ia_free_rx(iadev);
2598 err_free_tx:
2599         ia_free_tx(iadev);
2600 err_free_irq:
2601         free_irq(iadev->irq, dev);  
2602 err_out:
2603         return error;
2604 }  
2605   
2606 static void ia_close(struct atm_vcc *vcc)  
2607 {
2608         DEFINE_WAIT(wait);
2609         u16 *vc_table;
2610         IADEV *iadev;
2611         struct ia_vcc *ia_vcc;
2612         struct sk_buff *skb = NULL;
2613         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2614         unsigned long closetime, flags;
2615 
2616         iadev = INPH_IA_DEV(vcc->dev);
2617         ia_vcc = INPH_IA_VCC(vcc);
2618         if (!ia_vcc) return;  
2619 
2620         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n", 
2621                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2622         clear_bit(ATM_VF_READY,&vcc->flags);
2623         skb_queue_head_init (&tmp_tx_backlog);
2624         skb_queue_head_init (&tmp_vcc_backlog); 
2625         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2626            iadev->close_pending++;
2627            prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2628            schedule_timeout(msecs_to_jiffies(500));
2629            finish_wait(&iadev->timeout_wait, &wait);
2630            spin_lock_irqsave(&iadev->tx_lock, flags); 
2631            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2632               if (ATM_SKB(skb)->vcc == vcc){ 
2633                  if (vcc->pop) vcc->pop(vcc, skb);
2634                  else dev_kfree_skb_any(skb);
2635               }
2636               else 
2637                  skb_queue_tail(&tmp_tx_backlog, skb);
2638            } 
2639            while((skb = skb_dequeue(&tmp_tx_backlog))) 
2640              skb_queue_tail(&iadev->tx_backlog, skb);
2641            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) 
2642            closetime = 300000 / ia_vcc->pcr;
2643            if (closetime == 0)
2644               closetime = 1;
2645            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2646            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2647            spin_lock_irqsave(&iadev->tx_lock, flags);
2648            iadev->close_pending--;
2649            iadev->testTable[vcc->vci]->lastTime = 0;
2650            iadev->testTable[vcc->vci]->fract = 0; 
2651            iadev->testTable[vcc->vci]->vc_status = VC_UBR; 
2652            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2653               if (vcc->qos.txtp.min_pcr > 0)
2654                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2655            }
2656            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2657               ia_vcc = INPH_IA_VCC(vcc); 
2658               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2659               ia_cbrVc_close (vcc);
2660            }
2661            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2662         }
2663         
2664         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {   
2665            // reset reass table
2666            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2667            vc_table += vcc->vci; 
2668            *vc_table = NO_AAL5_PKT;
2669            // reset vc table
2670            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2671            vc_table += vcc->vci;
2672            *vc_table = (vcc->vci << 6) | 15;
2673            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2674               struct abr_vc_table __iomem *abr_vc_table = 
2675                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2676               abr_vc_table +=  vcc->vci;
2677               abr_vc_table->rdf = 0x0003;
2678               abr_vc_table->air = 0x5eb1;
2679            }                                 
2680            // Drain the packets
2681            rx_dle_intr(vcc->dev); 
2682            iadev->rx_open[vcc->vci] = NULL;
2683         }
2684         kfree(INPH_IA_VCC(vcc));  
2685         ia_vcc = NULL;
2686         vcc->dev_data = NULL;
2687         clear_bit(ATM_VF_ADDR,&vcc->flags);
2688         return;        
2689 }  
2690   
2691 static int ia_open(struct atm_vcc *vcc)
2692 {  
2693         struct ia_vcc *ia_vcc;  
2694         int error;  
2695         if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))  
2696         {  
2697                 IF_EVENT(printk("ia: not partially allocated resources\n");)  
2698                 vcc->dev_data = NULL;
2699         }  
2700         if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)  
2701         {  
2702                 IF_EVENT(printk("iphase open: unspec part\n");)  
2703                 set_bit(ATM_VF_ADDR,&vcc->flags);
2704         }  
2705         if (vcc->qos.aal != ATM_AAL5)  
2706                 return -EINVAL;  
2707         IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", 
2708                                  vcc->dev->number, vcc->vpi, vcc->vci);)  
2709   
2710         /* Device dependent initialization */  
2711         ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);  
2712         if (!ia_vcc) return -ENOMEM;  
2713         vcc->dev_data = ia_vcc;
2714   
2715         if ((error = open_rx(vcc)))  
2716         {  
2717                 IF_EVENT(printk("iadev: error in open_rx, closing\n");)  
2718                 ia_close(vcc);  
2719                 return error;  
2720         }  
2721   
2722         if ((error = open_tx(vcc)))  
2723         {  
2724                 IF_EVENT(printk("iadev: error in open_tx, closing\n");)  
2725                 ia_close(vcc);  
2726                 return error;  
2727         }  
2728   
2729         set_bit(ATM_VF_READY,&vcc->flags);
2730 
2731 #if 0
2732         {
2733            static u8 first = 1; 
2734            if (first) {
2735               ia_timer.expires = jiffies + 3*HZ;
2736               add_timer(&ia_timer);
2737               first = 0;
2738            }           
2739         }
2740 #endif
2741         IF_EVENT(printk("ia open returning\n");)  
2742         return 0;  
2743 }  
2744   
2745 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)  
2746 {  
2747         IF_EVENT(printk(">ia_change_qos\n");)  
2748         return 0;  
2749 }  
2750   
2751 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)  
2752 {  
2753    IA_CMDBUF ia_cmds;
2754    IADEV *iadev;
2755    int i, board;
2756    u16 __user *tmps;
2757    IF_EVENT(printk(">ia_ioctl\n");)  
2758    if (cmd != IA_CMD) {
2759       if (!dev->phy->ioctl) return -EINVAL;
2760       return dev->phy->ioctl(dev,cmd,arg);
2761    }
2762    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; 
2763    board = ia_cmds.status;
2764 
2765         if ((board < 0) || (board > iadev_count))
2766                 board = 0;
2767         board = array_index_nospec(board, iadev_count + 1);
2768 
2769    iadev = ia_dev[board];
2770    switch (ia_cmds.cmd) {
2771    case MEMDUMP:
2772    {
2773         switch (ia_cmds.sub_cmd) {
2774           case MEMDUMP_SEGREG:
2775              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2776              tmps = (u16 __user *)ia_cmds.buf;
2777              for(i=0; i<0x80; i+=2, tmps++)
2778                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2779              ia_cmds.status = 0;
2780              ia_cmds.len = 0x80;
2781              break;
2782           case MEMDUMP_REASSREG:
2783              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2784              tmps = (u16 __user *)ia_cmds.buf;
2785              for(i=0; i<0x80; i+=2, tmps++)
2786                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2787              ia_cmds.status = 0;
2788              ia_cmds.len = 0x80;
2789              break;
2790           case MEMDUMP_FFL:
2791           {  
2792              ia_regs_t       *regs_local;
2793              ffredn_t        *ffL;
2794              rfredn_t        *rfL;
2795                      
2796              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2797              regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2798              if (!regs_local) return -ENOMEM;
2799              ffL = &regs_local->ffredn;
2800              rfL = &regs_local->rfredn;
2801              /* Copy real rfred registers into the local copy */
2802              for (i=0; i<(sizeof (rfredn_t))/4; i++)
2803                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2804                 /* Copy real ffred registers into the local copy */
2805              for (i=0; i<(sizeof (ffredn_t))/4; i++)
2806                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2807 
2808              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2809                 kfree(regs_local);
2810                 return -EFAULT;
2811              }
2812              kfree(regs_local);
2813              printk("Board %d registers dumped\n", board);
2814              ia_cmds.status = 0;                  
2815          }      
2816              break;        
2817          case READ_REG:
2818          {  
2819              if (!capable(CAP_NET_ADMIN)) return -EPERM;
2820              desc_dbg(iadev); 
2821              ia_cmds.status = 0; 
2822          }
2823              break;
2824          case 0x6:
2825          {  
2826              ia_cmds.status = 0; 
2827              printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
2828              printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
2829          }
2830              break;
2831          case 0x8:
2832          {
2833              struct k_sonet_stats *stats;
2834              stats = &PRIV(_ia_dev[board])->sonet_stats;
2835              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2836              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2837              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2838              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2839              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2840              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2841              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2842              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2843              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2844          }
2845             ia_cmds.status = 0;
2846             break;
2847          case 0x9:
2848             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2849             for (i = 1; i <= iadev->num_rx_desc; i++)
2850                free_desc(_ia_dev[board], i);
2851             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), 
2852                                             iadev->reass_reg+REASS_MASK_REG);
2853             iadev->rxing = 1;
2854             
2855             ia_cmds.status = 0;
2856             break;
2857 
2858          case 0xb:
2859             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2860             ia_frontend_intr(iadev);
2861             break;
2862          case 0xa:
2863             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2864          {  
2865              ia_cmds.status = 0; 
2866              IADebugFlag = ia_cmds.maddr;
2867              printk("New debug option loaded\n");
2868          }
2869              break;
2870          default:
2871              ia_cmds.status = 0;
2872              break;
2873       } 
2874    }
2875       break;
2876    default:
2877       break;
2878 
2879    }    
2880    return 0;  
2881 }  
2882   
2883 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
2884         void __user *optval, int optlen)  
2885 {  
2886         IF_EVENT(printk(">ia_getsockopt\n");)  
2887         return -EINVAL;  
2888 }  
2889   
2890 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
2891         void __user *optval, unsigned int optlen)  
2892 {  
2893         IF_EVENT(printk(">ia_setsockopt\n");)  
2894         return -EINVAL;  
2895 }  
2896   
2897 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2898         IADEV *iadev;
2899         struct dle *wr_ptr;
2900         struct tx_buf_desc __iomem *buf_desc_ptr;
2901         int desc;
2902         int comp_code;
2903         int total_len;
2904         struct cpcs_trailer *trailer;
2905         struct ia_vcc *iavcc;
2906 
2907         iadev = INPH_IA_DEV(vcc->dev);  
2908         iavcc = INPH_IA_VCC(vcc);
2909         if (!iavcc->txing) {
2910            printk("discard packet on closed VC\n");
2911            if (vcc->pop)
2912                 vcc->pop(vcc, skb);
2913            else
2914                 dev_kfree_skb_any(skb);
2915            return 0;
2916         }
2917 
2918         if (skb->len > iadev->tx_buf_sz - 8) {
2919            printk("Transmit size over tx buffer size\n");
2920            if (vcc->pop)
2921                  vcc->pop(vcc, skb);
2922            else
2923                  dev_kfree_skb_any(skb);
2924           return 0;
2925         }
2926         if ((unsigned long)skb->data & 3) {
2927            printk("Misaligned SKB\n");
2928            if (vcc->pop)
2929                  vcc->pop(vcc, skb);
2930            else
2931                  dev_kfree_skb_any(skb);
2932            return 0;
2933         }       
2934         /* Get a descriptor number from our free descriptor queue  
2935            We get the descr number from the TCQ now, since I am using  
2936            the TCQ as a free buffer queue. Initially TCQ will be   
2937            initialized with all the descriptors and is hence, full.  
2938         */
2939         desc = get_desc (iadev, iavcc);
2940         if (desc == 0xffff) 
2941             return 1;
2942         comp_code = desc >> 13;  
2943         desc &= 0x1fff;  
2944   
2945         if ((desc == 0) || (desc > iadev->num_tx_desc))  
2946         {  
2947                 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) 
2948                 atomic_inc(&vcc->stats->tx);
2949                 if (vcc->pop)   
2950                     vcc->pop(vcc, skb);   
2951                 else  
2952                     dev_kfree_skb_any(skb);
2953                 return 0;   /* return SUCCESS */
2954         }  
2955   
2956         if (comp_code)  
2957         {  
2958             IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", 
2959                                                             desc, comp_code);)  
2960         }  
2961        
2962         /* remember the desc and vcc mapping */
2963         iavcc->vc_desc_cnt++;
2964         iadev->desc_tbl[desc-1].iavcc = iavcc;
2965         iadev->desc_tbl[desc-1].txskb = skb;
2966         IA_SKB_STATE(skb) = 0;
2967 
2968         iadev->ffL.tcq_rd += 2;
2969         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2970                 iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2971         writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2972   
2973         /* Put the descriptor number in the packet ready queue  
2974                 and put the updated write pointer in the DLE field   
2975         */   
2976         *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; 
2977 
2978         iadev->ffL.prq_wr += 2;
2979         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2980                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2981           
2982         /* Figure out the exact length of the packet and padding required to 
2983            make it  aligned on a 48 byte boundary.  */
2984         total_len = skb->len + sizeof(struct cpcs_trailer);  
2985         total_len = ((total_len + 47) / 48) * 48;
2986         IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)  
2987  
2988         /* Put the packet in a tx buffer */   
2989         trailer = iadev->tx_buf[desc-1].cpcs;
2990         IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2991                   skb, skb->data, skb->len, desc);)
2992         trailer->control = 0; 
2993         /*big endian*/ 
2994         trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2995         trailer->crc32 = 0;     /* not needed - dummy bytes */  
2996 
2997         /* Display the packet */  
2998         IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", 
2999                                                         skb->len, tcnter++);  
3000         xdump(skb->data, skb->len, "TX: ");
3001         printk("\n");)
3002 
3003         /* Build the buffer descriptor */  
3004         buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
3005         buf_desc_ptr += desc;   /* points to the corresponding entry */  
3006         buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;   
3007         /* Huh ? p.115 of users guide describes this as a read-only register */
3008         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
3009         buf_desc_ptr->vc_index = vcc->vci;
3010         buf_desc_ptr->bytes = total_len;  
3011 
3012         if (vcc->qos.txtp.traffic_class == ATM_ABR)  
3013            clear_lockup (vcc, iadev);
3014 
3015         /* Build the DLE structure */  
3016         wr_ptr = iadev->tx_dle_q.write;  
3017         memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));  
3018         wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3019                                               skb->len, DMA_TO_DEVICE);
3020         wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | 
3021                                                   buf_desc_ptr->buf_start_lo;  
3022         /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3023         wr_ptr->bytes = skb->len;  
3024 
3025         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3026         if ((wr_ptr->bytes >> 2) == 0xb)
3027            wr_ptr->bytes = 0x30;
3028 
3029         wr_ptr->mode = TX_DLE_PSI; 
3030         wr_ptr->prq_wr_ptr_data = 0;
3031   
3032         /* end is not to be used for the DLE q */  
3033         if (++wr_ptr == iadev->tx_dle_q.end)  
3034                 wr_ptr = iadev->tx_dle_q.start;  
3035         
3036         /* Build trailer dle */
3037         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3038         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | 
3039           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3040 
3041         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3042         wr_ptr->mode = DMA_INT_ENABLE; 
3043         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3044         
3045         /* end is not to be used for the DLE q */
3046         if (++wr_ptr == iadev->tx_dle_q.end)  
3047                 wr_ptr = iadev->tx_dle_q.start;
3048 
3049         iadev->tx_dle_q.write = wr_ptr;  
3050         ATM_DESC(skb) = vcc->vci;
3051         skb_queue_tail(&iadev->tx_dma_q, skb);
3052 
3053         atomic_inc(&vcc->stats->tx);
3054         iadev->tx_pkt_cnt++;
3055         /* Increment transaction counter */  
3056         writel(2, iadev->dma+IPHASE5575_TX_COUNTER);  
3057         
3058 #if 0        
3059         /* add flow control logic */ 
3060         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3061           if (iavcc->vc_desc_cnt > 10) {
3062              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3063             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3064               iavcc->flow_inc = -1;
3065               iavcc->saved_tx_quota = vcc->tx_quota;
3066            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3067              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3068              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); 
3069               iavcc->flow_inc = 0;
3070            }
3071         }
3072 #endif
3073         IF_TX(printk("ia send done\n");)  
3074         return 0;  
3075 }  
3076 
3077 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3078 {
3079         IADEV *iadev; 
3080         unsigned long flags;
3081 
3082         iadev = INPH_IA_DEV(vcc->dev);
3083         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3084         {
3085             if (!skb)
3086                 printk(KERN_CRIT "null skb in ia_send\n");
3087             else dev_kfree_skb_any(skb);
3088             return -EINVAL;
3089         }                         
3090         spin_lock_irqsave(&iadev->tx_lock, flags); 
3091         if (!test_bit(ATM_VF_READY,&vcc->flags)){ 
3092             dev_kfree_skb_any(skb);
3093             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3094             return -EINVAL; 
3095         }
3096         ATM_SKB(skb)->vcc = vcc;
3097  
3098         if (skb_peek(&iadev->tx_backlog)) {
3099            skb_queue_tail(&iadev->tx_backlog, skb);
3100         }
3101         else {
3102            if (ia_pkt_tx (vcc, skb)) {
3103               skb_queue_tail(&iadev->tx_backlog, skb);
3104            }
3105         }
3106         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3107         return 0;
3108 
3109 }
3110 
3111 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3112 { 
3113   int   left = *pos, n;   
3114   char  *tmpPtr;
3115   IADEV *iadev = INPH_IA_DEV(dev);
3116   if(!left--) {
3117      if (iadev->phy_type == FE_25MBIT_PHY) {
3118        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3119        return n;
3120      }
3121      if (iadev->phy_type == FE_DS3_PHY)
3122         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3123      else if (iadev->phy_type == FE_E3_PHY)
3124         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3125      else if (iadev->phy_type == FE_UTP_OPTION)
3126          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155"); 
3127      else
3128         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3129      tmpPtr = page + n;
3130      if (iadev->pci_map_size == 0x40000)
3131         n += sprintf(tmpPtr, "-1KVC-");
3132      else
3133         n += sprintf(tmpPtr, "-4KVC-");  
3134      tmpPtr = page + n; 
3135      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3136         n += sprintf(tmpPtr, "1M  \n");
3137      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3138         n += sprintf(tmpPtr, "512K\n");
3139      else
3140        n += sprintf(tmpPtr, "128K\n");
3141      return n;
3142   }
3143   if (!left) {
3144      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3145                            "  Size of Tx Buffer  :  %u\n"
3146                            "  Number of Rx Buffer:  %u\n"
3147                            "  Size of Rx Buffer  :  %u\n"
3148                            "  Packets Received   :  %u\n"
3149                            "  Packets Transmitted:  %u\n"
3150                            "  Cells Received     :  %u\n"
3151                            "  Cells Transmitted  :  %u\n"
3152                            "  Board Dropped Cells:  %u\n"
3153                            "  Board Dropped Pkts :  %u\n",
3154                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3155                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3156                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3157                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3158                            iadev->drop_rxcell, iadev->drop_rxpkt);                        
3159   }
3160   return 0;
3161 }
3162   
3163 static const struct atmdev_ops ops = {  
3164         .open           = ia_open,  
3165         .close          = ia_close,  
3166         .ioctl          = ia_ioctl,  
3167         .getsockopt     = ia_getsockopt,  
3168         .setsockopt     = ia_setsockopt,  
3169         .send           = ia_send,  
3170         .phy_put        = ia_phy_put,  
3171         .phy_get        = ia_phy_get,  
3172         .change_qos     = ia_change_qos,  
3173         .proc_read      = ia_proc_read,
3174         .owner          = THIS_MODULE,
3175 };  
3176           
3177 static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3178 {  
3179         struct atm_dev *dev;  
3180         IADEV *iadev;  
3181         int ret;
3182 
3183         iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3184         if (!iadev) {
3185                 ret = -ENOMEM;
3186                 goto err_out;
3187         }
3188 
3189         iadev->pci = pdev;
3190 
3191         IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3192                 pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3193         if (pci_enable_device(pdev)) {
3194                 ret = -ENODEV;
3195                 goto err_out_free_iadev;
3196         }
3197         dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3198         if (!dev) {
3199                 ret = -ENOMEM;
3200                 goto err_out_disable_dev;
3201         }
3202         dev->dev_data = iadev;
3203         IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3204         IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3205                 iadev->LineRate);)
3206 
3207         pci_set_drvdata(pdev, dev);
3208 
3209         ia_dev[iadev_count] = iadev;
3210         _ia_dev[iadev_count] = dev;
3211         iadev_count++;
3212         if (ia_init(dev) || ia_start(dev)) {  
3213                 IF_INIT(printk("IA register failed!\n");)
3214                 iadev_count--;
3215                 ia_dev[iadev_count] = NULL;
3216                 _ia_dev[iadev_count] = NULL;
3217                 ret = -EINVAL;
3218                 goto err_out_deregister_dev;
3219         }
3220         IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3221 
3222         iadev->next_board = ia_boards;  
3223         ia_boards = dev;  
3224 
3225         return 0;
3226 
3227 err_out_deregister_dev:
3228         atm_dev_deregister(dev);  
3229 err_out_disable_dev:
3230         pci_disable_device(pdev);
3231 err_out_free_iadev:
3232         kfree(iadev);
3233 err_out:
3234         return ret;
3235 }
3236 
3237 static void ia_remove_one(struct pci_dev *pdev)
3238 {
3239         struct atm_dev *dev = pci_get_drvdata(pdev);
3240         IADEV *iadev = INPH_IA_DEV(dev);
3241 
3242         /* Disable phy interrupts */
3243         ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3244                                    SUNI_RSOP_CIE);
3245         udelay(1);
3246 
3247         if (dev->phy && dev->phy->stop)
3248                 dev->phy->stop(dev);
3249 
3250         /* De-register device */  
3251         free_irq(iadev->irq, dev);
3252         iadev_count--;
3253         ia_dev[iadev_count] = NULL;
3254         _ia_dev[iadev_count] = NULL;
3255         IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3256         atm_dev_deregister(dev);
3257 
3258         iounmap(iadev->base);  
3259         pci_disable_device(pdev);
3260 
3261         ia_free_rx(iadev);
3262         ia_free_tx(iadev);
3263 
3264         kfree(iadev);
3265 }
3266 
3267 static const struct pci_device_id ia_pci_tbl[] = {
3268         { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3269         { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3270         { 0,}
3271 };
3272 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3273 
3274 static struct pci_driver ia_driver = {
3275         .name =         DEV_LABEL,
3276         .id_table =     ia_pci_tbl,
3277         .probe =        ia_init_one,
3278         .remove =       ia_remove_one,
3279 };
3280 
3281 static int __init ia_module_init(void)
3282 {
3283         int ret;
3284 
3285         ret = pci_register_driver(&ia_driver);
3286         if (ret >= 0) {
3287                 ia_timer.expires = jiffies + 3*HZ;
3288                 add_timer(&ia_timer); 
3289         } else
3290                 printk(KERN_ERR DEV_LABEL ": no adapter found\n");  
3291         return ret;
3292 }
3293 
3294 static void __exit ia_module_exit(void)
3295 {
3296         pci_unregister_driver(&ia_driver);
3297 
3298         del_timer(&ia_timer);
3299 }
3300 
3301 module_init(ia_module_init);
3302 module_exit(ia_module_exit);

/* [<][>][^][v][top][bottom][index][help] */