root/drivers/net/wan/fsl_ucc_hdlc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. uhdlc_init
  2. ucc_hdlc_tx
  3. hdlc_tx_restart
  4. hdlc_tx_done
  5. hdlc_rx_done
  6. ucc_hdlc_poll
  7. ucc_hdlc_irq_handler
  8. uhdlc_ioctl
  9. uhdlc_open
  10. uhdlc_memclean
  11. uhdlc_close
  12. ucc_hdlc_attach
  13. store_clk_config
  14. resume_clk_config
  15. uhdlc_suspend
  16. uhdlc_resume
  17. uhdlc_tx_timeout
  18. hdlc_map_iomem
  19. ucc_hdlc_probe
  20. ucc_hdlc_remove

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /* Freescale QUICC Engine HDLC Device Driver
   3  *
   4  * Copyright 2016 Freescale Semiconductor Inc.
   5  */
   6 
   7 #include <linux/delay.h>
   8 #include <linux/dma-mapping.h>
   9 #include <linux/hdlc.h>
  10 #include <linux/init.h>
  11 #include <linux/interrupt.h>
  12 #include <linux/io.h>
  13 #include <linux/irq.h>
  14 #include <linux/kernel.h>
  15 #include <linux/module.h>
  16 #include <linux/netdevice.h>
  17 #include <linux/of_address.h>
  18 #include <linux/of_irq.h>
  19 #include <linux/of_platform.h>
  20 #include <linux/platform_device.h>
  21 #include <linux/sched.h>
  22 #include <linux/skbuff.h>
  23 #include <linux/slab.h>
  24 #include <linux/spinlock.h>
  25 #include <linux/stddef.h>
  26 #include <soc/fsl/qe/qe_tdm.h>
  27 #include <uapi/linux/if_arp.h>
  28 
  29 #include "fsl_ucc_hdlc.h"
  30 
  31 #define DRV_DESC "Freescale QE UCC HDLC Driver"
  32 #define DRV_NAME "ucc_hdlc"
  33 
  34 #define TDM_PPPOHT_SLIC_MAXIN
  35 #define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
  36 
  37 static struct ucc_tdm_info utdm_primary_info = {
  38         .uf_info = {
  39                 .tsa = 0,
  40                 .cdp = 0,
  41                 .cds = 1,
  42                 .ctsp = 1,
  43                 .ctss = 1,
  44                 .revd = 0,
  45                 .urfs = 256,
  46                 .utfs = 256,
  47                 .urfet = 128,
  48                 .urfset = 192,
  49                 .utfet = 128,
  50                 .utftt = 0x40,
  51                 .ufpt = 256,
  52                 .mode = UCC_FAST_PROTOCOL_MODE_HDLC,
  53                 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
  54                 .tenc = UCC_FAST_TX_ENCODING_NRZ,
  55                 .renc = UCC_FAST_RX_ENCODING_NRZ,
  56                 .tcrc = UCC_FAST_16_BIT_CRC,
  57                 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
  58         },
  59 
  60         .si_info = {
  61 #ifdef TDM_PPPOHT_SLIC_MAXIN
  62                 .simr_rfsd = 1,
  63                 .simr_tfsd = 2,
  64 #else
  65                 .simr_rfsd = 0,
  66                 .simr_tfsd = 0,
  67 #endif
  68                 .simr_crt = 0,
  69                 .simr_sl = 0,
  70                 .simr_ce = 1,
  71                 .simr_fe = 1,
  72                 .simr_gm = 0,
  73         },
  74 };
  75 
  76 static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
  77 
  78 static int uhdlc_init(struct ucc_hdlc_private *priv)
  79 {
  80         struct ucc_tdm_info *ut_info;
  81         struct ucc_fast_info *uf_info;
  82         u32 cecr_subblock;
  83         u16 bd_status;
  84         int ret, i;
  85         void *bd_buffer;
  86         dma_addr_t bd_dma_addr;
  87         u32 riptr;
  88         u32 tiptr;
  89         u32 gumr;
  90 
  91         ut_info = priv->ut_info;
  92         uf_info = &ut_info->uf_info;
  93 
  94         if (priv->tsa) {
  95                 uf_info->tsa = 1;
  96                 uf_info->ctsp = 1;
  97                 uf_info->cds = 1;
  98                 uf_info->ctss = 1;
  99         } else {
 100                 uf_info->cds = 0;
 101                 uf_info->ctsp = 0;
 102                 uf_info->ctss = 0;
 103         }
 104 
 105         /* This sets HPM register in CMXUCR register which configures a
 106          * open drain connected HDLC bus
 107          */
 108         if (priv->hdlc_bus)
 109                 uf_info->brkpt_support = 1;
 110 
 111         uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
 112                                 UCC_HDLC_UCCE_TXB) << 16);
 113 
 114         ret = ucc_fast_init(uf_info, &priv->uccf);
 115         if (ret) {
 116                 dev_err(priv->dev, "Failed to init uccf.");
 117                 return ret;
 118         }
 119 
 120         priv->uf_regs = priv->uccf->uf_regs;
 121         ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 122 
 123         /* Loopback mode */
 124         if (priv->loopback) {
 125                 dev_info(priv->dev, "Loopback Mode\n");
 126                 /* use the same clock when work in loopback */
 127                 qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
 128 
 129                 gumr = ioread32be(&priv->uf_regs->gumr);
 130                 gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
 131                          UCC_FAST_GUMR_TCI);
 132                 gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
 133                 iowrite32be(gumr, &priv->uf_regs->gumr);
 134         }
 135 
 136         /* Initialize SI */
 137         if (priv->tsa)
 138                 ucc_tdm_init(priv->utdm, priv->ut_info);
 139 
 140         /* Write to QE CECR, UCCx channel to Stop Transmission */
 141         cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
 142         ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
 143                            QE_CR_PROTOCOL_UNSPECIFIED, 0);
 144 
 145         /* Set UPSMR normal mode (need fixed)*/
 146         iowrite32be(0, &priv->uf_regs->upsmr);
 147 
 148         /* hdlc_bus mode */
 149         if (priv->hdlc_bus) {
 150                 u32 upsmr;
 151 
 152                 dev_info(priv->dev, "HDLC bus Mode\n");
 153                 upsmr = ioread32be(&priv->uf_regs->upsmr);
 154 
 155                 /* bus mode and retransmit enable, with collision window
 156                  * set to 8 bytes
 157                  */
 158                 upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
 159                                 UCC_HDLC_UPSMR_CW8;
 160                 iowrite32be(upsmr, &priv->uf_regs->upsmr);
 161 
 162                 /* explicitly disable CDS & CTSP */
 163                 gumr = ioread32be(&priv->uf_regs->gumr);
 164                 gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
 165                 /* set automatic sync to explicitly ignore CD signal */
 166                 gumr |= UCC_FAST_GUMR_SYNL_AUTO;
 167                 iowrite32be(gumr, &priv->uf_regs->gumr);
 168         }
 169 
 170         priv->rx_ring_size = RX_BD_RING_LEN;
 171         priv->tx_ring_size = TX_BD_RING_LEN;
 172         /* Alloc Rx BD */
 173         priv->rx_bd_base = dma_alloc_coherent(priv->dev,
 174                         RX_BD_RING_LEN * sizeof(struct qe_bd),
 175                         &priv->dma_rx_bd, GFP_KERNEL);
 176 
 177         if (!priv->rx_bd_base) {
 178                 dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
 179                 ret = -ENOMEM;
 180                 goto free_uccf;
 181         }
 182 
 183         /* Alloc Tx BD */
 184         priv->tx_bd_base = dma_alloc_coherent(priv->dev,
 185                         TX_BD_RING_LEN * sizeof(struct qe_bd),
 186                         &priv->dma_tx_bd, GFP_KERNEL);
 187 
 188         if (!priv->tx_bd_base) {
 189                 dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
 190                 ret = -ENOMEM;
 191                 goto free_rx_bd;
 192         }
 193 
 194         /* Alloc parameter ram for ucc hdlc */
 195         priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
 196                                 ALIGNMENT_OF_UCC_HDLC_PRAM);
 197 
 198         if (IS_ERR_VALUE(priv->ucc_pram_offset)) {
 199                 dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
 200                 ret = -ENOMEM;
 201                 goto free_tx_bd;
 202         }
 203 
 204         priv->rx_skbuff = kcalloc(priv->rx_ring_size,
 205                                   sizeof(*priv->rx_skbuff),
 206                                   GFP_KERNEL);
 207         if (!priv->rx_skbuff)
 208                 goto free_ucc_pram;
 209 
 210         priv->tx_skbuff = kcalloc(priv->tx_ring_size,
 211                                   sizeof(*priv->tx_skbuff),
 212                                   GFP_KERNEL);
 213         if (!priv->tx_skbuff)
 214                 goto free_rx_skbuff;
 215 
 216         priv->skb_curtx = 0;
 217         priv->skb_dirtytx = 0;
 218         priv->curtx_bd = priv->tx_bd_base;
 219         priv->dirty_tx = priv->tx_bd_base;
 220         priv->currx_bd = priv->rx_bd_base;
 221         priv->currx_bdnum = 0;
 222 
 223         /* init parameter base */
 224         cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
 225         ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
 226                            QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
 227 
 228         priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
 229                                         qe_muram_addr(priv->ucc_pram_offset);
 230 
 231         /* Zero out parameter ram */
 232         memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
 233 
 234         /* Alloc riptr, tiptr */
 235         riptr = qe_muram_alloc(32, 32);
 236         if (IS_ERR_VALUE(riptr)) {
 237                 dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
 238                 ret = -ENOMEM;
 239                 goto free_tx_skbuff;
 240         }
 241 
 242         tiptr = qe_muram_alloc(32, 32);
 243         if (IS_ERR_VALUE(tiptr)) {
 244                 dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
 245                 ret = -ENOMEM;
 246                 goto free_riptr;
 247         }
 248         if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
 249                 dev_err(priv->dev, "MURAM allocation out of addressable range\n");
 250                 ret = -ENOMEM;
 251                 goto free_tiptr;
 252         }
 253 
 254         /* Set RIPTR, TIPTR */
 255         iowrite16be(riptr, &priv->ucc_pram->riptr);
 256         iowrite16be(tiptr, &priv->ucc_pram->tiptr);
 257 
 258         /* Set MRBLR */
 259         iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
 260 
 261         /* Set RBASE, TBASE */
 262         iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
 263         iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
 264 
 265         /* Set RSTATE, TSTATE */
 266         iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
 267         iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
 268 
 269         /* Set C_MASK, C_PRES for 16bit CRC */
 270         iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
 271         iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
 272 
 273         iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
 274         iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
 275         iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
 276         iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
 277         iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
 278         iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
 279         iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
 280         iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
 281 
 282         /* Get BD buffer */
 283         bd_buffer = dma_alloc_coherent(priv->dev,
 284                                        (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
 285                                        &bd_dma_addr, GFP_KERNEL);
 286 
 287         if (!bd_buffer) {
 288                 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
 289                 ret = -ENOMEM;
 290                 goto free_tiptr;
 291         }
 292 
 293         priv->rx_buffer = bd_buffer;
 294         priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
 295 
 296         priv->dma_rx_addr = bd_dma_addr;
 297         priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
 298 
 299         for (i = 0; i < RX_BD_RING_LEN; i++) {
 300                 if (i < (RX_BD_RING_LEN - 1))
 301                         bd_status = R_E_S | R_I_S;
 302                 else
 303                         bd_status = R_E_S | R_I_S | R_W_S;
 304 
 305                 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
 306                 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
 307                             &priv->rx_bd_base[i].buf);
 308         }
 309 
 310         for (i = 0; i < TX_BD_RING_LEN; i++) {
 311                 if (i < (TX_BD_RING_LEN - 1))
 312                         bd_status =  T_I_S | T_TC_S;
 313                 else
 314                         bd_status =  T_I_S | T_TC_S | T_W_S;
 315 
 316                 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
 317                 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
 318                             &priv->tx_bd_base[i].buf);
 319         }
 320 
 321         return 0;
 322 
 323 free_tiptr:
 324         qe_muram_free(tiptr);
 325 free_riptr:
 326         qe_muram_free(riptr);
 327 free_tx_skbuff:
 328         kfree(priv->tx_skbuff);
 329 free_rx_skbuff:
 330         kfree(priv->rx_skbuff);
 331 free_ucc_pram:
 332         qe_muram_free(priv->ucc_pram_offset);
 333 free_tx_bd:
 334         dma_free_coherent(priv->dev,
 335                           TX_BD_RING_LEN * sizeof(struct qe_bd),
 336                           priv->tx_bd_base, priv->dma_tx_bd);
 337 free_rx_bd:
 338         dma_free_coherent(priv->dev,
 339                           RX_BD_RING_LEN * sizeof(struct qe_bd),
 340                           priv->rx_bd_base, priv->dma_rx_bd);
 341 free_uccf:
 342         ucc_fast_free(priv->uccf);
 343 
 344         return ret;
 345 }
 346 
 347 static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
 348 {
 349         hdlc_device *hdlc = dev_to_hdlc(dev);
 350         struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
 351         struct qe_bd __iomem *bd;
 352         u16 bd_status;
 353         unsigned long flags;
 354         u16 *proto_head;
 355 
 356         switch (dev->type) {
 357         case ARPHRD_RAWHDLC:
 358                 if (skb_headroom(skb) < HDLC_HEAD_LEN) {
 359                         dev->stats.tx_dropped++;
 360                         dev_kfree_skb(skb);
 361                         netdev_err(dev, "No enough space for hdlc head\n");
 362                         return -ENOMEM;
 363                 }
 364 
 365                 skb_push(skb, HDLC_HEAD_LEN);
 366 
 367                 proto_head = (u16 *)skb->data;
 368                 *proto_head = htons(DEFAULT_HDLC_HEAD);
 369 
 370                 dev->stats.tx_bytes += skb->len;
 371                 break;
 372 
 373         case ARPHRD_PPP:
 374                 proto_head = (u16 *)skb->data;
 375                 if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
 376                         dev->stats.tx_dropped++;
 377                         dev_kfree_skb(skb);
 378                         netdev_err(dev, "Wrong ppp header\n");
 379                         return -ENOMEM;
 380                 }
 381 
 382                 dev->stats.tx_bytes += skb->len;
 383                 break;
 384 
 385         case ARPHRD_ETHER:
 386                 dev->stats.tx_bytes += skb->len;
 387                 break;
 388 
 389         default:
 390                 dev->stats.tx_dropped++;
 391                 dev_kfree_skb(skb);
 392                 return -ENOMEM;
 393         }
 394         netdev_sent_queue(dev, skb->len);
 395         spin_lock_irqsave(&priv->lock, flags);
 396 
 397         /* Start from the next BD that should be filled */
 398         bd = priv->curtx_bd;
 399         bd_status = ioread16be(&bd->status);
 400         /* Save the skb pointer so we can free it later */
 401         priv->tx_skbuff[priv->skb_curtx] = skb;
 402 
 403         /* Update the current skb pointer (wrapping if this was the last) */
 404         priv->skb_curtx =
 405             (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
 406 
 407         /* copy skb data to tx buffer for sdma processing */
 408         memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
 409                skb->data, skb->len);
 410 
 411         /* set bd status and length */
 412         bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
 413 
 414         iowrite16be(skb->len, &bd->length);
 415         iowrite16be(bd_status, &bd->status);
 416 
 417         /* Move to next BD in the ring */
 418         if (!(bd_status & T_W_S))
 419                 bd += 1;
 420         else
 421                 bd = priv->tx_bd_base;
 422 
 423         if (bd == priv->dirty_tx) {
 424                 if (!netif_queue_stopped(dev))
 425                         netif_stop_queue(dev);
 426         }
 427 
 428         priv->curtx_bd = bd;
 429 
 430         spin_unlock_irqrestore(&priv->lock, flags);
 431 
 432         return NETDEV_TX_OK;
 433 }
 434 
 435 static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
 436 {
 437         u32 cecr_subblock;
 438 
 439         cecr_subblock =
 440                 ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
 441 
 442         qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
 443                      QE_CR_PROTOCOL_UNSPECIFIED, 0);
 444         return 0;
 445 }
 446 
 447 static int hdlc_tx_done(struct ucc_hdlc_private *priv)
 448 {
 449         /* Start from the next BD that should be filled */
 450         struct net_device *dev = priv->ndev;
 451         unsigned int bytes_sent = 0;
 452         int howmany = 0;
 453         struct qe_bd *bd;               /* BD pointer */
 454         u16 bd_status;
 455         int tx_restart = 0;
 456 
 457         bd = priv->dirty_tx;
 458         bd_status = ioread16be(&bd->status);
 459 
 460         /* Normal processing. */
 461         while ((bd_status & T_R_S) == 0) {
 462                 struct sk_buff *skb;
 463 
 464                 if (bd_status & T_UN_S) { /* Underrun */
 465                         dev->stats.tx_fifo_errors++;
 466                         tx_restart = 1;
 467                 }
 468                 if (bd_status & T_CT_S) { /* Carrier lost */
 469                         dev->stats.tx_carrier_errors++;
 470                         tx_restart = 1;
 471                 }
 472 
 473                 /* BD contains already transmitted buffer.   */
 474                 /* Handle the transmitted buffer and release */
 475                 /* the BD to be used with the current frame  */
 476 
 477                 skb = priv->tx_skbuff[priv->skb_dirtytx];
 478                 if (!skb)
 479                         break;
 480                 howmany++;
 481                 bytes_sent += skb->len;
 482                 dev->stats.tx_packets++;
 483                 memset(priv->tx_buffer +
 484                        (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
 485                        0, skb->len);
 486                 dev_consume_skb_irq(skb);
 487 
 488                 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
 489                 priv->skb_dirtytx =
 490                     (priv->skb_dirtytx +
 491                      1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
 492 
 493                 /* We freed a buffer, so now we can restart transmission */
 494                 if (netif_queue_stopped(dev))
 495                         netif_wake_queue(dev);
 496 
 497                 /* Advance the confirmation BD pointer */
 498                 if (!(bd_status & T_W_S))
 499                         bd += 1;
 500                 else
 501                         bd = priv->tx_bd_base;
 502                 bd_status = ioread16be(&bd->status);
 503         }
 504         priv->dirty_tx = bd;
 505 
 506         if (tx_restart)
 507                 hdlc_tx_restart(priv);
 508 
 509         netdev_completed_queue(dev, howmany, bytes_sent);
 510         return 0;
 511 }
 512 
 513 static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
 514 {
 515         struct net_device *dev = priv->ndev;
 516         struct sk_buff *skb = NULL;
 517         hdlc_device *hdlc = dev_to_hdlc(dev);
 518         struct qe_bd *bd;
 519         u16 bd_status;
 520         u16 length, howmany = 0;
 521         u8 *bdbuffer;
 522 
 523         bd = priv->currx_bd;
 524         bd_status = ioread16be(&bd->status);
 525 
 526         /* while there are received buffers and BD is full (~R_E) */
 527         while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
 528                 if (bd_status & (RX_BD_ERRORS)) {
 529                         dev->stats.rx_errors++;
 530 
 531                         if (bd_status & R_CD_S)
 532                                 dev->stats.collisions++;
 533                         if (bd_status & R_OV_S)
 534                                 dev->stats.rx_fifo_errors++;
 535                         if (bd_status & R_CR_S)
 536                                 dev->stats.rx_crc_errors++;
 537                         if (bd_status & R_AB_S)
 538                                 dev->stats.rx_over_errors++;
 539                         if (bd_status & R_NO_S)
 540                                 dev->stats.rx_frame_errors++;
 541                         if (bd_status & R_LG_S)
 542                                 dev->stats.rx_length_errors++;
 543 
 544                         goto recycle;
 545                 }
 546                 bdbuffer = priv->rx_buffer +
 547                         (priv->currx_bdnum * MAX_RX_BUF_LENGTH);
 548                 length = ioread16be(&bd->length);
 549 
 550                 switch (dev->type) {
 551                 case ARPHRD_RAWHDLC:
 552                         bdbuffer += HDLC_HEAD_LEN;
 553                         length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
 554 
 555                         skb = dev_alloc_skb(length);
 556                         if (!skb) {
 557                                 dev->stats.rx_dropped++;
 558                                 return -ENOMEM;
 559                         }
 560 
 561                         skb_put(skb, length);
 562                         skb->len = length;
 563                         skb->dev = dev;
 564                         memcpy(skb->data, bdbuffer, length);
 565                         break;
 566 
 567                 case ARPHRD_PPP:
 568                 case ARPHRD_ETHER:
 569                         length -= HDLC_CRC_SIZE;
 570 
 571                         skb = dev_alloc_skb(length);
 572                         if (!skb) {
 573                                 dev->stats.rx_dropped++;
 574                                 return -ENOMEM;
 575                         }
 576 
 577                         skb_put(skb, length);
 578                         skb->len = length;
 579                         skb->dev = dev;
 580                         memcpy(skb->data, bdbuffer, length);
 581                         break;
 582                 }
 583 
 584                 dev->stats.rx_packets++;
 585                 dev->stats.rx_bytes += skb->len;
 586                 howmany++;
 587                 if (hdlc->proto)
 588                         skb->protocol = hdlc_type_trans(skb, dev);
 589                 netif_receive_skb(skb);
 590 
 591 recycle:
 592                 iowrite16be((bd_status & R_W_S) | R_E_S | R_I_S, &bd->status);
 593 
 594                 /* update to point at the next bd */
 595                 if (bd_status & R_W_S) {
 596                         priv->currx_bdnum = 0;
 597                         bd = priv->rx_bd_base;
 598                 } else {
 599                         if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
 600                                 priv->currx_bdnum += 1;
 601                         else
 602                                 priv->currx_bdnum = RX_BD_RING_LEN - 1;
 603 
 604                         bd += 1;
 605                 }
 606 
 607                 bd_status = ioread16be(&bd->status);
 608         }
 609 
 610         priv->currx_bd = bd;
 611         return howmany;
 612 }
 613 
 614 static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
 615 {
 616         struct ucc_hdlc_private *priv = container_of(napi,
 617                                                      struct ucc_hdlc_private,
 618                                                      napi);
 619         int howmany;
 620 
 621         /* Tx event processing */
 622         spin_lock(&priv->lock);
 623         hdlc_tx_done(priv);
 624         spin_unlock(&priv->lock);
 625 
 626         howmany = 0;
 627         howmany += hdlc_rx_done(priv, budget - howmany);
 628 
 629         if (howmany < budget) {
 630                 napi_complete_done(napi, howmany);
 631                 qe_setbits32(priv->uccf->p_uccm,
 632                              (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
 633         }
 634 
 635         return howmany;
 636 }
 637 
 638 static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
 639 {
 640         struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
 641         struct net_device *dev = priv->ndev;
 642         struct ucc_fast_private *uccf;
 643         struct ucc_tdm_info *ut_info;
 644         u32 ucce;
 645         u32 uccm;
 646 
 647         ut_info = priv->ut_info;
 648         uccf = priv->uccf;
 649 
 650         ucce = ioread32be(uccf->p_ucce);
 651         uccm = ioread32be(uccf->p_uccm);
 652         ucce &= uccm;
 653         iowrite32be(ucce, uccf->p_ucce);
 654         if (!ucce)
 655                 return IRQ_NONE;
 656 
 657         if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
 658                 if (napi_schedule_prep(&priv->napi)) {
 659                         uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
 660                                   << 16);
 661                         iowrite32be(uccm, uccf->p_uccm);
 662                         __napi_schedule(&priv->napi);
 663                 }
 664         }
 665 
 666         /* Errors and other events */
 667         if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
 668                 dev->stats.rx_missed_errors++;
 669         if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
 670                 dev->stats.tx_errors++;
 671 
 672         return IRQ_HANDLED;
 673 }
 674 
 675 static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 676 {
 677         const size_t size = sizeof(te1_settings);
 678         te1_settings line;
 679         struct ucc_hdlc_private *priv = netdev_priv(dev);
 680 
 681         if (cmd != SIOCWANDEV)
 682                 return hdlc_ioctl(dev, ifr, cmd);
 683 
 684         switch (ifr->ifr_settings.type) {
 685         case IF_GET_IFACE:
 686                 ifr->ifr_settings.type = IF_IFACE_E1;
 687                 if (ifr->ifr_settings.size < size) {
 688                         ifr->ifr_settings.size = size; /* data size wanted */
 689                         return -ENOBUFS;
 690                 }
 691                 memset(&line, 0, sizeof(line));
 692                 line.clock_type = priv->clocking;
 693 
 694                 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
 695                         return -EFAULT;
 696                 return 0;
 697 
 698         default:
 699                 return hdlc_ioctl(dev, ifr, cmd);
 700         }
 701 }
 702 
 703 static int uhdlc_open(struct net_device *dev)
 704 {
 705         u32 cecr_subblock;
 706         hdlc_device *hdlc = dev_to_hdlc(dev);
 707         struct ucc_hdlc_private *priv = hdlc->priv;
 708         struct ucc_tdm *utdm = priv->utdm;
 709 
 710         if (priv->hdlc_busy != 1) {
 711                 if (request_irq(priv->ut_info->uf_info.irq,
 712                                 ucc_hdlc_irq_handler, 0, "hdlc", priv))
 713                         return -ENODEV;
 714 
 715                 cecr_subblock = ucc_fast_get_qe_cr_subblock(
 716                                         priv->ut_info->uf_info.ucc_num);
 717 
 718                 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
 719                              QE_CR_PROTOCOL_UNSPECIFIED, 0);
 720 
 721                 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 722 
 723                 /* Enable the TDM port */
 724                 if (priv->tsa)
 725                         utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
 726 
 727                 priv->hdlc_busy = 1;
 728                 netif_device_attach(priv->ndev);
 729                 napi_enable(&priv->napi);
 730                 netdev_reset_queue(dev);
 731                 netif_start_queue(dev);
 732                 hdlc_open(dev);
 733         }
 734 
 735         return 0;
 736 }
 737 
 738 static void uhdlc_memclean(struct ucc_hdlc_private *priv)
 739 {
 740         qe_muram_free(priv->ucc_pram->riptr);
 741         qe_muram_free(priv->ucc_pram->tiptr);
 742 
 743         if (priv->rx_bd_base) {
 744                 dma_free_coherent(priv->dev,
 745                                   RX_BD_RING_LEN * sizeof(struct qe_bd),
 746                                   priv->rx_bd_base, priv->dma_rx_bd);
 747 
 748                 priv->rx_bd_base = NULL;
 749                 priv->dma_rx_bd = 0;
 750         }
 751 
 752         if (priv->tx_bd_base) {
 753                 dma_free_coherent(priv->dev,
 754                                   TX_BD_RING_LEN * sizeof(struct qe_bd),
 755                                   priv->tx_bd_base, priv->dma_tx_bd);
 756 
 757                 priv->tx_bd_base = NULL;
 758                 priv->dma_tx_bd = 0;
 759         }
 760 
 761         if (priv->ucc_pram) {
 762                 qe_muram_free(priv->ucc_pram_offset);
 763                 priv->ucc_pram = NULL;
 764                 priv->ucc_pram_offset = 0;
 765          }
 766 
 767         kfree(priv->rx_skbuff);
 768         priv->rx_skbuff = NULL;
 769 
 770         kfree(priv->tx_skbuff);
 771         priv->tx_skbuff = NULL;
 772 
 773         if (priv->uf_regs) {
 774                 iounmap(priv->uf_regs);
 775                 priv->uf_regs = NULL;
 776         }
 777 
 778         if (priv->uccf) {
 779                 ucc_fast_free(priv->uccf);
 780                 priv->uccf = NULL;
 781         }
 782 
 783         if (priv->rx_buffer) {
 784                 dma_free_coherent(priv->dev,
 785                                   RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
 786                                   priv->rx_buffer, priv->dma_rx_addr);
 787                 priv->rx_buffer = NULL;
 788                 priv->dma_rx_addr = 0;
 789         }
 790 
 791         if (priv->tx_buffer) {
 792                 dma_free_coherent(priv->dev,
 793                                   TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
 794                                   priv->tx_buffer, priv->dma_tx_addr);
 795                 priv->tx_buffer = NULL;
 796                 priv->dma_tx_addr = 0;
 797         }
 798 }
 799 
 800 static int uhdlc_close(struct net_device *dev)
 801 {
 802         struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
 803         struct ucc_tdm *utdm = priv->utdm;
 804         u32 cecr_subblock;
 805 
 806         napi_disable(&priv->napi);
 807         cecr_subblock = ucc_fast_get_qe_cr_subblock(
 808                                 priv->ut_info->uf_info.ucc_num);
 809 
 810         qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
 811                      (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
 812         qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
 813                      (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
 814 
 815         if (priv->tsa)
 816                 utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port);
 817 
 818         ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 819 
 820         free_irq(priv->ut_info->uf_info.irq, priv);
 821         netif_stop_queue(dev);
 822         netdev_reset_queue(dev);
 823         priv->hdlc_busy = 0;
 824 
 825         return 0;
 826 }
 827 
 828 static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
 829                            unsigned short parity)
 830 {
 831         struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
 832 
 833         if (encoding != ENCODING_NRZ &&
 834             encoding != ENCODING_NRZI)
 835                 return -EINVAL;
 836 
 837         if (parity != PARITY_NONE &&
 838             parity != PARITY_CRC32_PR1_CCITT &&
 839             parity != PARITY_CRC16_PR0_CCITT &&
 840             parity != PARITY_CRC16_PR1_CCITT)
 841                 return -EINVAL;
 842 
 843         priv->encoding = encoding;
 844         priv->parity = parity;
 845 
 846         return 0;
 847 }
 848 
 849 #ifdef CONFIG_PM
 850 static void store_clk_config(struct ucc_hdlc_private *priv)
 851 {
 852         struct qe_mux *qe_mux_reg = &qe_immr->qmx;
 853 
 854         /* store si clk */
 855         priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
 856         priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
 857 
 858         /* store si sync */
 859         priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
 860 
 861         /* store ucc clk */
 862         memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
 863 }
 864 
 865 static void resume_clk_config(struct ucc_hdlc_private *priv)
 866 {
 867         struct qe_mux *qe_mux_reg = &qe_immr->qmx;
 868 
 869         memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
 870 
 871         iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
 872         iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
 873 
 874         iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
 875 }
 876 
 877 static int uhdlc_suspend(struct device *dev)
 878 {
 879         struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
 880         struct ucc_tdm_info *ut_info;
 881         struct ucc_fast __iomem *uf_regs;
 882 
 883         if (!priv)
 884                 return -EINVAL;
 885 
 886         if (!netif_running(priv->ndev))
 887                 return 0;
 888 
 889         netif_device_detach(priv->ndev);
 890         napi_disable(&priv->napi);
 891 
 892         ut_info = priv->ut_info;
 893         uf_regs = priv->uf_regs;
 894 
 895         /* backup gumr guemr*/
 896         priv->gumr = ioread32be(&uf_regs->gumr);
 897         priv->guemr = ioread8(&uf_regs->guemr);
 898 
 899         priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
 900                                         GFP_KERNEL);
 901         if (!priv->ucc_pram_bak)
 902                 return -ENOMEM;
 903 
 904         /* backup HDLC parameter */
 905         memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
 906                       sizeof(struct ucc_hdlc_param));
 907 
 908         /* store the clk configuration */
 909         store_clk_config(priv);
 910 
 911         /* save power */
 912         ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 913 
 914         return 0;
 915 }
 916 
 917 static int uhdlc_resume(struct device *dev)
 918 {
 919         struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
 920         struct ucc_tdm *utdm;
 921         struct ucc_tdm_info *ut_info;
 922         struct ucc_fast __iomem *uf_regs;
 923         struct ucc_fast_private *uccf;
 924         struct ucc_fast_info *uf_info;
 925         int ret, i;
 926         u32 cecr_subblock;
 927         u16 bd_status;
 928 
 929         if (!priv)
 930                 return -EINVAL;
 931 
 932         if (!netif_running(priv->ndev))
 933                 return 0;
 934 
 935         utdm = priv->utdm;
 936         ut_info = priv->ut_info;
 937         uf_info = &ut_info->uf_info;
 938         uf_regs = priv->uf_regs;
 939         uccf = priv->uccf;
 940 
 941         /* restore gumr guemr */
 942         iowrite8(priv->guemr, &uf_regs->guemr);
 943         iowrite32be(priv->gumr, &uf_regs->gumr);
 944 
 945         /* Set Virtual Fifo registers */
 946         iowrite16be(uf_info->urfs, &uf_regs->urfs);
 947         iowrite16be(uf_info->urfet, &uf_regs->urfet);
 948         iowrite16be(uf_info->urfset, &uf_regs->urfset);
 949         iowrite16be(uf_info->utfs, &uf_regs->utfs);
 950         iowrite16be(uf_info->utfet, &uf_regs->utfet);
 951         iowrite16be(uf_info->utftt, &uf_regs->utftt);
 952         /* utfb, urfb are offsets from MURAM base */
 953         iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
 954         iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
 955 
 956         /* Rx Tx and sync clock routing */
 957         resume_clk_config(priv);
 958 
 959         iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
 960         iowrite32be(0xffffffff, &uf_regs->ucce);
 961 
 962         ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
 963 
 964         /* rebuild SIRAM */
 965         if (priv->tsa)
 966                 ucc_tdm_init(priv->utdm, priv->ut_info);
 967 
 968         /* Write to QE CECR, UCCx channel to Stop Transmission */
 969         cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
 970         ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
 971                            (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
 972 
 973         /* Set UPSMR normal mode */
 974         iowrite32be(0, &uf_regs->upsmr);
 975 
 976         /* init parameter base */
 977         cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
 978         ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
 979                            QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
 980 
 981         priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
 982                                 qe_muram_addr(priv->ucc_pram_offset);
 983 
 984         /* restore ucc parameter */
 985         memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
 986                     sizeof(struct ucc_hdlc_param));
 987         kfree(priv->ucc_pram_bak);
 988 
 989         /* rebuild BD entry */
 990         for (i = 0; i < RX_BD_RING_LEN; i++) {
 991                 if (i < (RX_BD_RING_LEN - 1))
 992                         bd_status = R_E_S | R_I_S;
 993                 else
 994                         bd_status = R_E_S | R_I_S | R_W_S;
 995 
 996                 iowrite16be(bd_status, &priv->rx_bd_base[i].status);
 997                 iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH,
 998                             &priv->rx_bd_base[i].buf);
 999         }
1000 
1001         for (i = 0; i < TX_BD_RING_LEN; i++) {
1002                 if (i < (TX_BD_RING_LEN - 1))
1003                         bd_status =  T_I_S | T_TC_S;
1004                 else
1005                         bd_status =  T_I_S | T_TC_S | T_W_S;
1006 
1007                 iowrite16be(bd_status, &priv->tx_bd_base[i].status);
1008                 iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH,
1009                             &priv->tx_bd_base[i].buf);
1010         }
1011 
1012         /* if hdlc is busy enable TX and RX */
1013         if (priv->hdlc_busy == 1) {
1014                 cecr_subblock = ucc_fast_get_qe_cr_subblock(
1015                                         priv->ut_info->uf_info.ucc_num);
1016 
1017                 qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
1018                              (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
1019 
1020                 ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
1021 
1022                 /* Enable the TDM port */
1023                 if (priv->tsa)
1024                         utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port);
1025         }
1026 
1027         napi_enable(&priv->napi);
1028         netif_device_attach(priv->ndev);
1029 
1030         return 0;
1031 }
1032 
1033 static const struct dev_pm_ops uhdlc_pm_ops = {
1034         .suspend = uhdlc_suspend,
1035         .resume = uhdlc_resume,
1036         .freeze = uhdlc_suspend,
1037         .thaw = uhdlc_resume,
1038 };
1039 
1040 #define HDLC_PM_OPS (&uhdlc_pm_ops)
1041 
1042 #else
1043 
1044 #define HDLC_PM_OPS NULL
1045 
1046 #endif
1047 static void uhdlc_tx_timeout(struct net_device *ndev)
1048 {
1049         netdev_err(ndev, "%s\n", __func__);
1050 }
1051 
1052 static const struct net_device_ops uhdlc_ops = {
1053         .ndo_open       = uhdlc_open,
1054         .ndo_stop       = uhdlc_close,
1055         .ndo_start_xmit = hdlc_start_xmit,
1056         .ndo_do_ioctl   = uhdlc_ioctl,
1057         .ndo_tx_timeout = uhdlc_tx_timeout,
1058 };
1059 
1060 static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
1061 {
1062         struct device_node *np;
1063         struct platform_device *pdev;
1064         struct resource *res;
1065         static int siram_init_flag;
1066         int ret = 0;
1067 
1068         np = of_find_compatible_node(NULL, NULL, name);
1069         if (!np)
1070                 return -EINVAL;
1071 
1072         pdev = of_find_device_by_node(np);
1073         if (!pdev) {
1074                 pr_err("%pOFn: failed to lookup pdev\n", np);
1075                 of_node_put(np);
1076                 return -EINVAL;
1077         }
1078 
1079         of_node_put(np);
1080         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1081         if (!res) {
1082                 ret = -EINVAL;
1083                 goto error_put_device;
1084         }
1085         *ptr = ioremap(res->start, resource_size(res));
1086         if (!*ptr) {
1087                 ret = -ENOMEM;
1088                 goto error_put_device;
1089         }
1090 
1091         /* We've remapped the addresses, and we don't need the device any
1092          * more, so we should release it.
1093          */
1094         put_device(&pdev->dev);
1095 
1096         if (init_flag && siram_init_flag == 0) {
1097                 memset_io(*ptr, 0, resource_size(res));
1098                 siram_init_flag = 1;
1099         }
1100         return  0;
1101 
1102 error_put_device:
1103         put_device(&pdev->dev);
1104 
1105         return ret;
1106 }
1107 
1108 static int ucc_hdlc_probe(struct platform_device *pdev)
1109 {
1110         struct device_node *np = pdev->dev.of_node;
1111         struct ucc_hdlc_private *uhdlc_priv = NULL;
1112         struct ucc_tdm_info *ut_info;
1113         struct ucc_tdm *utdm = NULL;
1114         struct resource res;
1115         struct net_device *dev;
1116         hdlc_device *hdlc;
1117         int ucc_num;
1118         const char *sprop;
1119         int ret;
1120         u32 val;
1121 
1122         ret = of_property_read_u32_index(np, "cell-index", 0, &val);
1123         if (ret) {
1124                 dev_err(&pdev->dev, "Invalid ucc property\n");
1125                 return -ENODEV;
1126         }
1127 
1128         ucc_num = val - 1;
1129         if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
1130                 dev_err(&pdev->dev, ": Invalid UCC num\n");
1131                 return -EINVAL;
1132         }
1133 
1134         memcpy(&utdm_info[ucc_num], &utdm_primary_info,
1135                sizeof(utdm_primary_info));
1136 
1137         ut_info = &utdm_info[ucc_num];
1138         ut_info->uf_info.ucc_num = ucc_num;
1139 
1140         sprop = of_get_property(np, "rx-clock-name", NULL);
1141         if (sprop) {
1142                 ut_info->uf_info.rx_clock = qe_clock_source(sprop);
1143                 if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
1144                     (ut_info->uf_info.rx_clock > QE_CLK24)) {
1145                         dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1146                         return -EINVAL;
1147                 }
1148         } else {
1149                 dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
1150                 return -EINVAL;
1151         }
1152 
1153         sprop = of_get_property(np, "tx-clock-name", NULL);
1154         if (sprop) {
1155                 ut_info->uf_info.tx_clock = qe_clock_source(sprop);
1156                 if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
1157                     (ut_info->uf_info.tx_clock > QE_CLK24)) {
1158                         dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1159                         return -EINVAL;
1160                 }
1161         } else {
1162                 dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
1163                 return -EINVAL;
1164         }
1165 
1166         ret = of_address_to_resource(np, 0, &res);
1167         if (ret)
1168                 return -EINVAL;
1169 
1170         ut_info->uf_info.regs = res.start;
1171         ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
1172 
1173         uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
1174         if (!uhdlc_priv) {
1175                 return -ENOMEM;
1176         }
1177 
1178         dev_set_drvdata(&pdev->dev, uhdlc_priv);
1179         uhdlc_priv->dev = &pdev->dev;
1180         uhdlc_priv->ut_info = ut_info;
1181 
1182         if (of_get_property(np, "fsl,tdm-interface", NULL))
1183                 uhdlc_priv->tsa = 1;
1184 
1185         if (of_get_property(np, "fsl,ucc-internal-loopback", NULL))
1186                 uhdlc_priv->loopback = 1;
1187 
1188         if (of_get_property(np, "fsl,hdlc-bus", NULL))
1189                 uhdlc_priv->hdlc_bus = 1;
1190 
1191         if (uhdlc_priv->tsa == 1) {
1192                 utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
1193                 if (!utdm) {
1194                         ret = -ENOMEM;
1195                         dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
1196                         goto free_uhdlc_priv;
1197                 }
1198                 uhdlc_priv->utdm = utdm;
1199                 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1200                 if (ret)
1201                         goto free_utdm;
1202 
1203                 ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
1204                                      (void __iomem **)&utdm->si_regs);
1205                 if (ret)
1206                         goto free_utdm;
1207                 ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1208                                      (void __iomem **)&utdm->siram);
1209                 if (ret)
1210                         goto unmap_si_regs;
1211         }
1212 
1213         if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
1214                 uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
1215 
1216         ret = uhdlc_init(uhdlc_priv);
1217         if (ret) {
1218                 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1219                 goto undo_uhdlc_init;
1220         }
1221 
1222         dev = alloc_hdlcdev(uhdlc_priv);
1223         if (!dev) {
1224                 ret = -ENOMEM;
1225                 pr_err("ucc_hdlc: unable to allocate memory\n");
1226                 goto undo_uhdlc_init;
1227         }
1228 
1229         uhdlc_priv->ndev = dev;
1230         hdlc = dev_to_hdlc(dev);
1231         dev->tx_queue_len = 16;
1232         dev->netdev_ops = &uhdlc_ops;
1233         dev->watchdog_timeo = 2 * HZ;
1234         hdlc->attach = ucc_hdlc_attach;
1235         hdlc->xmit = ucc_hdlc_tx;
1236         netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
1237         if (register_hdlc_device(dev)) {
1238                 ret = -ENOBUFS;
1239                 pr_err("ucc_hdlc: unable to register hdlc device\n");
1240                 goto free_dev;
1241         }
1242 
1243         return 0;
1244 
1245 free_dev:
1246         free_netdev(dev);
1247 undo_uhdlc_init:
1248         iounmap(utdm->siram);
1249 unmap_si_regs:
1250         iounmap(utdm->si_regs);
1251 free_utdm:
1252         if (uhdlc_priv->tsa)
1253                 kfree(utdm);
1254 free_uhdlc_priv:
1255         kfree(uhdlc_priv);
1256         return ret;
1257 }
1258 
1259 static int ucc_hdlc_remove(struct platform_device *pdev)
1260 {
1261         struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
1262 
1263         uhdlc_memclean(priv);
1264 
1265         if (priv->utdm->si_regs) {
1266                 iounmap(priv->utdm->si_regs);
1267                 priv->utdm->si_regs = NULL;
1268         }
1269 
1270         if (priv->utdm->siram) {
1271                 iounmap(priv->utdm->siram);
1272                 priv->utdm->siram = NULL;
1273         }
1274         kfree(priv);
1275 
1276         dev_info(&pdev->dev, "UCC based hdlc module removed\n");
1277 
1278         return 0;
1279 }
1280 
1281 static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
1282         {
1283         .compatible = "fsl,ucc-hdlc",
1284         },
1285         {},
1286 };
1287 
1288 MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
1289 
1290 static struct platform_driver ucc_hdlc_driver = {
1291         .probe  = ucc_hdlc_probe,
1292         .remove = ucc_hdlc_remove,
1293         .driver = {
1294                 .name           = DRV_NAME,
1295                 .pm             = HDLC_PM_OPS,
1296                 .of_match_table = fsl_ucc_hdlc_of_match,
1297         },
1298 };
1299 
1300 module_platform_driver(ucc_hdlc_driver);
1301 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */