1/********************************************************************* 2 * 3 * vlsi_ir.c: VLSI82C147 PCI IrDA controller driver for Linux 4 * 5 * Copyright (c) 2001-2003 Martin Diehl 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License as 9 * published by the Free Software Foundation; either version 2 of 10 * the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 * 20 ********************************************************************/ 21 22#include <linux/module.h> 23 24#define DRIVER_NAME "vlsi_ir" 25#define DRIVER_VERSION "v0.5" 26#define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147" 27#define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>" 28 29MODULE_DESCRIPTION(DRIVER_DESCRIPTION); 30MODULE_AUTHOR(DRIVER_AUTHOR); 31MODULE_LICENSE("GPL"); 32 33/********************************************************/ 34 35#include <linux/kernel.h> 36#include <linux/ktime.h> 37#include <linux/init.h> 38#include <linux/interrupt.h> 39#include <linux/pci.h> 40#include <linux/slab.h> 41#include <linux/netdevice.h> 42#include <linux/skbuff.h> 43#include <linux/delay.h> 44#include <linux/proc_fs.h> 45#include <linux/seq_file.h> 46#include <linux/math64.h> 47#include <linux/mutex.h> 48#include <asm/uaccess.h> 49#include <asm/byteorder.h> 50 51#include <net/irda/irda.h> 52#include <net/irda/irda_device.h> 53#include <net/irda/wrapper.h> 54#include <net/irda/crc.h> 55 56#include "vlsi_ir.h" 57 58/********************************************************/ 59 60static /* const */ char drivername[] = DRIVER_NAME; 61 62static const struct pci_device_id vlsi_irda_table[] = { 63 { 64 .class = PCI_CLASS_WIRELESS_IRDA << 8, 65 .class_mask = PCI_CLASS_SUBCLASS_MASK << 8, 66 .vendor = PCI_VENDOR_ID_VLSI, 67 .device = PCI_DEVICE_ID_VLSI_82C147, 68 .subvendor = PCI_ANY_ID, 69 .subdevice = PCI_ANY_ID, 70 }, 71 { /* all zeroes */ } 72}; 73 74MODULE_DEVICE_TABLE(pci, vlsi_irda_table); 75 76/********************************************************/ 77 78/* clksrc: which clock source to be used 79 * 0: auto - try PLL, fallback to 40MHz XCLK 80 * 1: on-chip 48MHz PLL 81 * 2: external 48MHz XCLK 82 * 3: external 40MHz XCLK (HP OB-800) 83 */ 84 85static int clksrc = 0; /* default is 0(auto) */ 86module_param(clksrc, int, 0); 87MODULE_PARM_DESC(clksrc, "clock input source selection"); 88 89/* ringsize: size of the tx and rx descriptor rings 90 * independent for tx and rx 91 * specify as ringsize=tx[,rx] 92 * allowed values: 4, 8, 16, 32, 64 93 * Due to the IrDA 1.x max. allowed window size=7, 94 * there should be no gain when using rings larger than 8 95 */ 96 97static int ringsize[] = {8,8}; /* default is tx=8 / rx=8 */ 98module_param_array(ringsize, int, NULL, 0); 99MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size"); 100 101/* sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits 102 * 0: very short, 1.5us (exception: 6us at 2.4 kbaud) 103 * 1: nominal 3/16 bittime width 104 * note: IrDA compliant peer devices should be happy regardless 105 * which one is used. Primary goal is to save some power 106 * on the sender's side - at 9.6kbaud for example the short 107 * pulse width saves more than 90% of the transmitted IR power. 108 */ 109 110static int sirpulse = 1; /* default is 3/16 bittime */ 111module_param(sirpulse, int, 0); 112MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning"); 113 114/* qos_mtt_bits: encoded min-turn-time value we require the peer device 115 * to use before transmitting to us. "Type 1" (per-station) 116 * bitfield according to IrLAP definition (section 6.6.8) 117 * Don't know which transceiver is used by my OB800 - the 118 * pretty common HP HDLS-1100 requires 1 msec - so lets use this. 119 */ 120 121static int qos_mtt_bits = 0x07; /* default is 1 ms or more */ 122module_param(qos_mtt_bits, int, 0); 123MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time"); 124 125/********************************************************/ 126 127static void vlsi_reg_debug(unsigned iobase, const char *s) 128{ 129 int i; 130 131 printk(KERN_DEBUG "%s: ", s); 132 for (i = 0; i < 0x20; i++) 133 printk("%02x", (unsigned)inb((iobase+i))); 134 printk("\n"); 135} 136 137static void vlsi_ring_debug(struct vlsi_ring *r) 138{ 139 struct ring_descr *rd; 140 unsigned i; 141 142 printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 143 __func__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw); 144 printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __func__, 145 atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask); 146 for (i = 0; i < r->size; i++) { 147 rd = &r->rd[i]; 148 printk(KERN_DEBUG "%s - ring descr %u: ", __func__, i); 149 printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 150 printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n", 151 __func__, (unsigned) rd_get_status(rd), 152 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 153 } 154} 155 156/********************************************************/ 157 158/* needed regardless of CONFIG_PROC_FS */ 159static struct proc_dir_entry *vlsi_proc_root = NULL; 160 161#ifdef CONFIG_PROC_FS 162 163static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev) 164{ 165 unsigned iobase = pci_resource_start(pdev, 0); 166 unsigned i; 167 168 seq_printf(seq, "\n%s (vid/did: [%04x:%04x])\n", 169 pci_name(pdev), (int)pdev->vendor, (int)pdev->device); 170 seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state); 171 seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n", 172 pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask); 173 seq_printf(seq, "hw registers: "); 174 for (i = 0; i < 0x20; i++) 175 seq_printf(seq, "%02x", (unsigned)inb((iobase+i))); 176 seq_printf(seq, "\n"); 177} 178 179static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev) 180{ 181 vlsi_irda_dev_t *idev = netdev_priv(ndev); 182 u8 byte; 183 u16 word; 184 s32 sec, usec; 185 unsigned iobase = ndev->base_addr; 186 187 seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name, 188 netif_device_present(ndev) ? "attached" : "detached", 189 netif_running(ndev) ? "running" : "not running", 190 netif_carrier_ok(ndev) ? "carrier ok" : "no carrier", 191 netif_queue_stopped(ndev) ? "queue stopped" : "queue running"); 192 193 if (!netif_running(ndev)) 194 return; 195 196 seq_printf(seq, "\nhw-state:\n"); 197 pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte); 198 seq_printf(seq, "IRMISC:%s%s%s uart%s", 199 (byte&IRMISC_IRRAIL) ? " irrail" : "", 200 (byte&IRMISC_IRPD) ? " irpd" : "", 201 (byte&IRMISC_UARTTST) ? " uarttest" : "", 202 (byte&IRMISC_UARTEN) ? "@" : " disabled\n"); 203 if (byte&IRMISC_UARTEN) { 204 seq_printf(seq, "0x%s\n", 205 (byte&2) ? ((byte&1) ? "3e8" : "2e8") 206 : ((byte&1) ? "3f8" : "2f8")); 207 } 208 pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte); 209 seq_printf(seq, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n", 210 (byte&CLKCTL_PD_INV) ? "powered" : "down", 211 (byte&CLKCTL_LOCK) ? " locked" : "", 212 (byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "", 213 (byte&CLKCTL_CLKSTP) ? "stopped" : "running", 214 (byte&CLKCTL_WAKE) ? "enabled" : "disabled"); 215 pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte); 216 seq_printf(seq, "MSTRPAGE: 0x%02x\n", (unsigned)byte); 217 218 byte = inb(iobase+VLSI_PIO_IRINTR); 219 seq_printf(seq, "IRINTR:%s%s%s%s%s%s%s%s\n", 220 (byte&IRINTR_ACTEN) ? " ACTEN" : "", 221 (byte&IRINTR_RPKTEN) ? " RPKTEN" : "", 222 (byte&IRINTR_TPKTEN) ? " TPKTEN" : "", 223 (byte&IRINTR_OE_EN) ? " OE_EN" : "", 224 (byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "", 225 (byte&IRINTR_RPKTINT) ? " RPKTINT" : "", 226 (byte&IRINTR_TPKTINT) ? " TPKTINT" : "", 227 (byte&IRINTR_OE_INT) ? " OE_INT" : ""); 228 word = inw(iobase+VLSI_PIO_RINGPTR); 229 seq_printf(seq, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word)); 230 word = inw(iobase+VLSI_PIO_RINGBASE); 231 seq_printf(seq, "RINGBASE: busmap=0x%08x\n", 232 ((unsigned)word << 10)|(MSTRPAGE_VALUE<<24)); 233 word = inw(iobase+VLSI_PIO_RINGSIZE); 234 seq_printf(seq, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word), 235 RINGSIZE_TO_TXSIZE(word)); 236 237 word = inw(iobase+VLSI_PIO_IRCFG); 238 seq_printf(seq, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n", 239 (word&IRCFG_LOOP) ? " LOOP" : "", 240 (word&IRCFG_ENTX) ? " ENTX" : "", 241 (word&IRCFG_ENRX) ? " ENRX" : "", 242 (word&IRCFG_MSTR) ? " MSTR" : "", 243 (word&IRCFG_RXANY) ? " RXANY" : "", 244 (word&IRCFG_CRC16) ? " CRC16" : "", 245 (word&IRCFG_FIR) ? " FIR" : "", 246 (word&IRCFG_MIR) ? " MIR" : "", 247 (word&IRCFG_SIR) ? " SIR" : "", 248 (word&IRCFG_SIRFILT) ? " SIRFILT" : "", 249 (word&IRCFG_SIRTEST) ? " SIRTEST" : "", 250 (word&IRCFG_TXPOL) ? " TXPOL" : "", 251 (word&IRCFG_RXPOL) ? " RXPOL" : ""); 252 word = inw(iobase+VLSI_PIO_IRENABLE); 253 seq_printf(seq, "IRENABLE:%s%s%s%s%s%s%s%s\n", 254 (word&IRENABLE_PHYANDCLOCK) ? " PHYANDCLOCK" : "", 255 (word&IRENABLE_CFGER) ? " CFGERR" : "", 256 (word&IRENABLE_FIR_ON) ? " FIR_ON" : "", 257 (word&IRENABLE_MIR_ON) ? " MIR_ON" : "", 258 (word&IRENABLE_SIR_ON) ? " SIR_ON" : "", 259 (word&IRENABLE_ENTXST) ? " ENTXST" : "", 260 (word&IRENABLE_ENRXST) ? " ENRXST" : "", 261 (word&IRENABLE_CRC16_ON) ? " CRC16_ON" : ""); 262 word = inw(iobase+VLSI_PIO_PHYCTL); 263 seq_printf(seq, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n", 264 (unsigned)PHYCTL_TO_BAUD(word), 265 (unsigned)PHYCTL_TO_PLSWID(word), 266 (unsigned)PHYCTL_TO_PREAMB(word)); 267 word = inw(iobase+VLSI_PIO_NPHYCTL); 268 seq_printf(seq, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n", 269 (unsigned)PHYCTL_TO_BAUD(word), 270 (unsigned)PHYCTL_TO_PLSWID(word), 271 (unsigned)PHYCTL_TO_PREAMB(word)); 272 word = inw(iobase+VLSI_PIO_MAXPKT); 273 seq_printf(seq, "MAXPKT: max. rx packet size = %u\n", word); 274 word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 275 seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word); 276 277 seq_printf(seq, "\nsw-state:\n"); 278 seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud, 279 (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR")); 280 sec = div_s64_rem(ktime_us_delta(ktime_get(), idev->last_rx), 281 USEC_PER_SEC, &usec); 282 seq_printf(seq, "last rx: %ul.%06u sec\n", sec, usec); 283 284 seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu", 285 ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors, 286 ndev->stats.rx_dropped); 287 seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n", 288 ndev->stats.rx_over_errors, ndev->stats.rx_length_errors, 289 ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors); 290 seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n", 291 ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors, 292 ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors); 293 294} 295 296static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r) 297{ 298 struct ring_descr *rd; 299 unsigned i, j; 300 int h, t; 301 302 seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n", 303 r->size, r->mask, r->len, r->dir, r->rd[0].hw); 304 h = atomic_read(&r->head) & r->mask; 305 t = atomic_read(&r->tail) & r->mask; 306 seq_printf(seq, "head = %d / tail = %d ", h, t); 307 if (h == t) 308 seq_printf(seq, "(empty)\n"); 309 else { 310 if (((t+1)&r->mask) == h) 311 seq_printf(seq, "(full)\n"); 312 else 313 seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask)); 314 rd = &r->rd[h]; 315 j = (unsigned) rd_get_count(rd); 316 seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n", 317 h, (unsigned)rd_get_status(rd), j); 318 if (j > 0) { 319 seq_printf(seq, " data: %*ph\n", 320 min_t(unsigned, j, 20), rd->buf); 321 } 322 } 323 for (i = 0; i < r->size; i++) { 324 rd = &r->rd[i]; 325 seq_printf(seq, "> ring descr %u: ", i); 326 seq_printf(seq, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw); 327 seq_printf(seq, " hw: status=%02x count=%u busaddr=0x%08x\n", 328 (unsigned) rd_get_status(rd), 329 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd)); 330 } 331} 332 333static int vlsi_seq_show(struct seq_file *seq, void *v) 334{ 335 struct net_device *ndev = seq->private; 336 vlsi_irda_dev_t *idev = netdev_priv(ndev); 337 unsigned long flags; 338 339 seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION); 340 seq_printf(seq, "clksrc: %s\n", 341 (clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK") 342 : ((clksrc==1)?"48MHz PLL":"autodetect")); 343 seq_printf(seq, "ringsize: tx=%d / rx=%d\n", 344 ringsize[0], ringsize[1]); 345 seq_printf(seq, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short"); 346 seq_printf(seq, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits); 347 348 spin_lock_irqsave(&idev->lock, flags); 349 if (idev->pdev != NULL) { 350 vlsi_proc_pdev(seq, idev->pdev); 351 352 if (idev->pdev->current_state == 0) 353 vlsi_proc_ndev(seq, ndev); 354 else 355 seq_printf(seq, "\nPCI controller down - resume_ok = %d\n", 356 idev->resume_ok); 357 if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) { 358 seq_printf(seq, "\n--------- RX ring -----------\n\n"); 359 vlsi_proc_ring(seq, idev->rx_ring); 360 seq_printf(seq, "\n--------- TX ring -----------\n\n"); 361 vlsi_proc_ring(seq, idev->tx_ring); 362 } 363 } 364 seq_printf(seq, "\n"); 365 spin_unlock_irqrestore(&idev->lock, flags); 366 367 return 0; 368} 369 370static int vlsi_seq_open(struct inode *inode, struct file *file) 371{ 372 return single_open(file, vlsi_seq_show, PDE_DATA(inode)); 373} 374 375static const struct file_operations vlsi_proc_fops = { 376 .owner = THIS_MODULE, 377 .open = vlsi_seq_open, 378 .read = seq_read, 379 .llseek = seq_lseek, 380 .release = single_release, 381}; 382 383#define VLSI_PROC_FOPS (&vlsi_proc_fops) 384 385#else /* CONFIG_PROC_FS */ 386#define VLSI_PROC_FOPS NULL 387#endif 388 389/********************************************************/ 390 391static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap, 392 unsigned size, unsigned len, int dir) 393{ 394 struct vlsi_ring *r; 395 struct ring_descr *rd; 396 unsigned i, j; 397 dma_addr_t busaddr; 398 399 if (!size || ((size-1)&size)!=0) /* must be >0 and power of 2 */ 400 return NULL; 401 402 r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL); 403 if (!r) 404 return NULL; 405 memset(r, 0, sizeof(*r)); 406 407 r->pdev = pdev; 408 r->dir = dir; 409 r->len = len; 410 r->rd = (struct ring_descr *)(r+1); 411 r->mask = size - 1; 412 r->size = size; 413 atomic_set(&r->head, 0); 414 atomic_set(&r->tail, 0); 415 416 for (i = 0; i < size; i++) { 417 rd = r->rd + i; 418 memset(rd, 0, sizeof(*rd)); 419 rd->hw = hwmap + i; 420 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA); 421 if (rd->buf == NULL || 422 !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) { 423 if (rd->buf) { 424 net_err_ratelimited("%s: failed to create PCI-MAP for %p\n", 425 __func__, rd->buf); 426 kfree(rd->buf); 427 rd->buf = NULL; 428 } 429 for (j = 0; j < i; j++) { 430 rd = r->rd + j; 431 busaddr = rd_get_addr(rd); 432 rd_set_addr_status(rd, 0, 0); 433 if (busaddr) 434 pci_unmap_single(pdev, busaddr, len, dir); 435 kfree(rd->buf); 436 rd->buf = NULL; 437 } 438 kfree(r); 439 return NULL; 440 } 441 rd_set_addr_status(rd, busaddr, 0); 442 /* initially, the dma buffer is owned by the CPU */ 443 rd->skb = NULL; 444 } 445 return r; 446} 447 448static int vlsi_free_ring(struct vlsi_ring *r) 449{ 450 struct ring_descr *rd; 451 unsigned i; 452 dma_addr_t busaddr; 453 454 for (i = 0; i < r->size; i++) { 455 rd = r->rd + i; 456 if (rd->skb) 457 dev_kfree_skb_any(rd->skb); 458 busaddr = rd_get_addr(rd); 459 rd_set_addr_status(rd, 0, 0); 460 if (busaddr) 461 pci_unmap_single(r->pdev, busaddr, r->len, r->dir); 462 kfree(rd->buf); 463 } 464 kfree(r); 465 return 0; 466} 467 468static int vlsi_create_hwif(vlsi_irda_dev_t *idev) 469{ 470 char *ringarea; 471 struct ring_descr_hw *hwmap; 472 473 idev->virtaddr = NULL; 474 idev->busaddr = 0; 475 476 ringarea = pci_zalloc_consistent(idev->pdev, HW_RING_AREA_SIZE, 477 &idev->busaddr); 478 if (!ringarea) 479 goto out; 480 481 hwmap = (struct ring_descr_hw *)ringarea; 482 idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1], 483 XFER_BUF_SIZE, PCI_DMA_FROMDEVICE); 484 if (idev->rx_ring == NULL) 485 goto out_unmap; 486 487 hwmap += MAX_RING_DESCR; 488 idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0], 489 XFER_BUF_SIZE, PCI_DMA_TODEVICE); 490 if (idev->tx_ring == NULL) 491 goto out_free_rx; 492 493 idev->virtaddr = ringarea; 494 return 0; 495 496out_free_rx: 497 vlsi_free_ring(idev->rx_ring); 498out_unmap: 499 idev->rx_ring = idev->tx_ring = NULL; 500 pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr); 501 idev->busaddr = 0; 502out: 503 return -ENOMEM; 504} 505 506static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev) 507{ 508 vlsi_free_ring(idev->rx_ring); 509 vlsi_free_ring(idev->tx_ring); 510 idev->rx_ring = idev->tx_ring = NULL; 511 512 if (idev->busaddr) 513 pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr); 514 515 idev->virtaddr = NULL; 516 idev->busaddr = 0; 517 518 return 0; 519} 520 521/********************************************************/ 522 523static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd) 524{ 525 u16 status; 526 int crclen, len = 0; 527 struct sk_buff *skb; 528 int ret = 0; 529 struct net_device *ndev = pci_get_drvdata(r->pdev); 530 vlsi_irda_dev_t *idev = netdev_priv(ndev); 531 532 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 533 /* dma buffer now owned by the CPU */ 534 status = rd_get_status(rd); 535 if (status & RD_RX_ERROR) { 536 if (status & RD_RX_OVER) 537 ret |= VLSI_RX_OVER; 538 if (status & RD_RX_LENGTH) 539 ret |= VLSI_RX_LENGTH; 540 if (status & RD_RX_PHYERR) 541 ret |= VLSI_RX_FRAME; 542 if (status & RD_RX_CRCERR) 543 ret |= VLSI_RX_CRC; 544 goto done; 545 } 546 547 len = rd_get_count(rd); 548 crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16); 549 len -= crclen; /* remove trailing CRC */ 550 if (len <= 0) { 551 pr_debug("%s: strange frame (len=%d)\n", __func__, len); 552 ret |= VLSI_RX_DROP; 553 goto done; 554 } 555 556 if (idev->mode == IFF_SIR) { /* hw checks CRC in MIR, FIR mode */ 557 558 /* rd->buf is a streaming PCI_DMA_FROMDEVICE map. Doing the 559 * endian-adjustment there just in place will dirty a cache line 560 * which belongs to the map and thus we must be sure it will 561 * get flushed before giving the buffer back to hardware. 562 * vlsi_fill_rx() will do this anyway - but here we rely on. 563 */ 564 le16_to_cpus(rd->buf+len); 565 if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) { 566 pr_debug("%s: crc error\n", __func__); 567 ret |= VLSI_RX_CRC; 568 goto done; 569 } 570 } 571 572 if (!rd->skb) { 573 net_warn_ratelimited("%s: rx packet lost\n", __func__); 574 ret |= VLSI_RX_DROP; 575 goto done; 576 } 577 578 skb = rd->skb; 579 rd->skb = NULL; 580 skb->dev = ndev; 581 memcpy(skb_put(skb,len), rd->buf, len); 582 skb_reset_mac_header(skb); 583 if (in_interrupt()) 584 netif_rx(skb); 585 else 586 netif_rx_ni(skb); 587 588done: 589 rd_set_status(rd, 0); 590 rd_set_count(rd, 0); 591 /* buffer still owned by CPU */ 592 593 return (ret) ? -ret : len; 594} 595 596static void vlsi_fill_rx(struct vlsi_ring *r) 597{ 598 struct ring_descr *rd; 599 600 for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) { 601 if (rd_is_active(rd)) { 602 net_warn_ratelimited("%s: driver bug: rx descr race with hw\n", 603 __func__); 604 vlsi_ring_debug(r); 605 break; 606 } 607 if (!rd->skb) { 608 rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE); 609 if (rd->skb) { 610 skb_reserve(rd->skb,1); 611 rd->skb->protocol = htons(ETH_P_IRDA); 612 } 613 else 614 break; /* probably not worth logging? */ 615 } 616 /* give dma buffer back to busmaster */ 617 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir); 618 rd_activate(rd); 619 } 620} 621 622static void vlsi_rx_interrupt(struct net_device *ndev) 623{ 624 vlsi_irda_dev_t *idev = netdev_priv(ndev); 625 struct vlsi_ring *r = idev->rx_ring; 626 struct ring_descr *rd; 627 int ret; 628 629 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 630 631 if (rd_is_active(rd)) 632 break; 633 634 ret = vlsi_process_rx(r, rd); 635 636 if (ret < 0) { 637 ret = -ret; 638 ndev->stats.rx_errors++; 639 if (ret & VLSI_RX_DROP) 640 ndev->stats.rx_dropped++; 641 if (ret & VLSI_RX_OVER) 642 ndev->stats.rx_over_errors++; 643 if (ret & VLSI_RX_LENGTH) 644 ndev->stats.rx_length_errors++; 645 if (ret & VLSI_RX_FRAME) 646 ndev->stats.rx_frame_errors++; 647 if (ret & VLSI_RX_CRC) 648 ndev->stats.rx_crc_errors++; 649 } 650 else if (ret > 0) { 651 ndev->stats.rx_packets++; 652 ndev->stats.rx_bytes += ret; 653 } 654 } 655 656 idev->last_rx = ktime_get(); /* remember "now" for later mtt delay */ 657 658 vlsi_fill_rx(r); 659 660 if (ring_first(r) == NULL) { 661 /* we are in big trouble, if this should ever happen */ 662 net_err_ratelimited("%s: rx ring exhausted!\n", __func__); 663 vlsi_ring_debug(r); 664 } 665 else 666 outw(0, ndev->base_addr+VLSI_PIO_PROMPT); 667} 668 669/* caller must have stopped the controller from busmastering */ 670 671static void vlsi_unarm_rx(vlsi_irda_dev_t *idev) 672{ 673 struct net_device *ndev = pci_get_drvdata(idev->pdev); 674 struct vlsi_ring *r = idev->rx_ring; 675 struct ring_descr *rd; 676 int ret; 677 678 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 679 680 ret = 0; 681 if (rd_is_active(rd)) { 682 rd_set_status(rd, 0); 683 if (rd_get_count(rd)) { 684 pr_debug("%s - dropping rx packet\n", __func__); 685 ret = -VLSI_RX_DROP; 686 } 687 rd_set_count(rd, 0); 688 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 689 if (rd->skb) { 690 dev_kfree_skb_any(rd->skb); 691 rd->skb = NULL; 692 } 693 } 694 else 695 ret = vlsi_process_rx(r, rd); 696 697 if (ret < 0) { 698 ret = -ret; 699 ndev->stats.rx_errors++; 700 if (ret & VLSI_RX_DROP) 701 ndev->stats.rx_dropped++; 702 if (ret & VLSI_RX_OVER) 703 ndev->stats.rx_over_errors++; 704 if (ret & VLSI_RX_LENGTH) 705 ndev->stats.rx_length_errors++; 706 if (ret & VLSI_RX_FRAME) 707 ndev->stats.rx_frame_errors++; 708 if (ret & VLSI_RX_CRC) 709 ndev->stats.rx_crc_errors++; 710 } 711 else if (ret > 0) { 712 ndev->stats.rx_packets++; 713 ndev->stats.rx_bytes += ret; 714 } 715 } 716} 717 718/********************************************************/ 719 720static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd) 721{ 722 u16 status; 723 int len; 724 int ret; 725 726 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 727 /* dma buffer now owned by the CPU */ 728 status = rd_get_status(rd); 729 if (status & RD_TX_UNDRN) 730 ret = VLSI_TX_FIFO; 731 else 732 ret = 0; 733 rd_set_status(rd, 0); 734 735 if (rd->skb) { 736 len = rd->skb->len; 737 dev_kfree_skb_any(rd->skb); 738 rd->skb = NULL; 739 } 740 else /* tx-skb already freed? - should never happen */ 741 len = rd_get_count(rd); /* incorrect for SIR! (due to wrapping) */ 742 743 rd_set_count(rd, 0); 744 /* dma buffer still owned by the CPU */ 745 746 return (ret) ? -ret : len; 747} 748 749static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase) 750{ 751 u16 nphyctl; 752 u16 config; 753 unsigned mode; 754 int ret; 755 int baudrate; 756 int fifocnt; 757 758 baudrate = idev->new_baud; 759 pr_debug("%s: %d -> %d\n", __func__, idev->baud, idev->new_baud); 760 if (baudrate == 4000000) { 761 mode = IFF_FIR; 762 config = IRCFG_FIR; 763 nphyctl = PHYCTL_FIR; 764 } 765 else if (baudrate == 1152000) { 766 mode = IFF_MIR; 767 config = IRCFG_MIR | IRCFG_CRC16; 768 nphyctl = PHYCTL_MIR(clksrc==3); 769 } 770 else { 771 mode = IFF_SIR; 772 config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY; 773 switch(baudrate) { 774 default: 775 net_warn_ratelimited("%s: undefined baudrate %d - fallback to 9600!\n", 776 __func__, baudrate); 777 baudrate = 9600; 778 /* fallthru */ 779 case 2400: 780 case 9600: 781 case 19200: 782 case 38400: 783 case 57600: 784 case 115200: 785 nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3); 786 break; 787 } 788 } 789 config |= IRCFG_MSTR | IRCFG_ENRX; 790 791 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 792 if (fifocnt != 0) { 793 pr_debug("%s: rx fifo not empty(%d)\n", __func__, fifocnt); 794 } 795 796 outw(0, iobase+VLSI_PIO_IRENABLE); 797 outw(config, iobase+VLSI_PIO_IRCFG); 798 outw(nphyctl, iobase+VLSI_PIO_NPHYCTL); 799 wmb(); 800 outw(IRENABLE_PHYANDCLOCK, iobase+VLSI_PIO_IRENABLE); 801 mb(); 802 803 udelay(1); /* chip applies IRCFG on next rising edge of its 8MHz clock */ 804 805 /* read back settings for validation */ 806 807 config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK; 808 809 if (mode == IFF_FIR) 810 config ^= IRENABLE_FIR_ON; 811 else if (mode == IFF_MIR) 812 config ^= (IRENABLE_MIR_ON|IRENABLE_CRC16_ON); 813 else 814 config ^= IRENABLE_SIR_ON; 815 816 if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) { 817 net_warn_ratelimited("%s: failed to set %s mode!\n", 818 __func__, 819 mode == IFF_SIR ? "SIR" : 820 mode == IFF_MIR ? "MIR" : "FIR"); 821 ret = -1; 822 } 823 else { 824 if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) { 825 net_warn_ratelimited("%s: failed to apply baudrate %d\n", 826 __func__, baudrate); 827 ret = -1; 828 } 829 else { 830 idev->mode = mode; 831 idev->baud = baudrate; 832 idev->new_baud = 0; 833 ret = 0; 834 } 835 } 836 837 if (ret) 838 vlsi_reg_debug(iobase,__func__); 839 840 return ret; 841} 842 843static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb, 844 struct net_device *ndev) 845{ 846 vlsi_irda_dev_t *idev = netdev_priv(ndev); 847 struct vlsi_ring *r = idev->tx_ring; 848 struct ring_descr *rd; 849 unsigned long flags; 850 unsigned iobase = ndev->base_addr; 851 u8 status; 852 u16 config; 853 int mtt, diff; 854 int len, speed; 855 char *msg = NULL; 856 857 speed = irda_get_next_speed(skb); 858 spin_lock_irqsave(&idev->lock, flags); 859 if (speed != -1 && speed != idev->baud) { 860 netif_stop_queue(ndev); 861 idev->new_baud = speed; 862 status = RD_TX_CLRENTX; /* stop tx-ring after this frame */ 863 } 864 else 865 status = 0; 866 867 if (skb->len == 0) { 868 /* handle zero packets - should be speed change */ 869 if (status == 0) { 870 msg = "bogus zero-length packet"; 871 goto drop_unlock; 872 } 873 874 /* due to the completely asynch tx operation we might have 875 * IrLAP racing with the hardware here, f.e. if the controller 876 * is just sending the last packet with current speed while 877 * the LAP is already switching the speed using synchronous 878 * len=0 packet. Immediate execution would lead to hw lockup 879 * requiring a powercycle to reset. Good candidate to trigger 880 * this is the final UA:RSP packet after receiving a DISC:CMD 881 * when getting the LAP down. 882 * Note that we are not protected by the queue_stop approach 883 * because the final UA:RSP arrives _without_ request to apply 884 * new-speed-after-this-packet - hence the driver doesn't know 885 * this was the last packet and doesn't stop the queue. So the 886 * forced switch to default speed from LAP gets through as fast 887 * as only some 10 usec later while the UA:RSP is still processed 888 * by the hardware and we would get screwed. 889 */ 890 891 if (ring_first(idev->tx_ring) == NULL) { 892 /* no race - tx-ring already empty */ 893 vlsi_set_baud(idev, iobase); 894 netif_wake_queue(ndev); 895 } 896 else 897 ; 898 /* keep the speed change pending like it would 899 * for any len>0 packet. tx completion interrupt 900 * will apply it when the tx ring becomes empty. 901 */ 902 spin_unlock_irqrestore(&idev->lock, flags); 903 dev_kfree_skb_any(skb); 904 return NETDEV_TX_OK; 905 } 906 907 /* sanity checks - simply drop the packet */ 908 909 rd = ring_last(r); 910 if (!rd) { 911 msg = "ring full, but queue wasn't stopped"; 912 goto drop_unlock; 913 } 914 915 if (rd_is_active(rd)) { 916 msg = "entry still owned by hw"; 917 goto drop_unlock; 918 } 919 920 if (!rd->buf) { 921 msg = "tx ring entry without pci buffer"; 922 goto drop_unlock; 923 } 924 925 if (rd->skb) { 926 msg = "ring entry with old skb still attached"; 927 goto drop_unlock; 928 } 929 930 /* no need for serialization or interrupt disable during mtt */ 931 spin_unlock_irqrestore(&idev->lock, flags); 932 933 if ((mtt = irda_get_mtt(skb)) > 0) { 934 diff = ktime_us_delta(ktime_get(), idev->last_rx); 935 if (mtt > diff) 936 udelay(mtt - diff); 937 /* must not sleep here - called under netif_tx_lock! */ 938 } 939 940 /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu() 941 * after subsequent tx-completion 942 */ 943 944 if (idev->mode == IFF_SIR) { 945 status |= RD_TX_DISCRC; /* no hw-crc creation */ 946 len = async_wrap_skb(skb, rd->buf, r->len); 947 948 /* Some rare worst case situation in SIR mode might lead to 949 * potential buffer overflow. The wrapper detects this, returns 950 * with a shortened frame (without FCS/EOF) but doesn't provide 951 * any error indication about the invalid packet which we are 952 * going to transmit. 953 * Therefore we log if the buffer got filled to the point, where the 954 * wrapper would abort, i.e. when there are less than 5 bytes left to 955 * allow appending the FCS/EOF. 956 */ 957 958 if (len >= r->len-5) 959 net_warn_ratelimited("%s: possible buffer overflow with SIR wrapping!\n", 960 __func__); 961 } 962 else { 963 /* hw deals with MIR/FIR mode wrapping */ 964 status |= RD_TX_PULSE; /* send 2 us highspeed indication pulse */ 965 len = skb->len; 966 if (len > r->len) { 967 msg = "frame exceeds tx buffer length"; 968 goto drop; 969 } 970 else 971 skb_copy_from_linear_data(skb, rd->buf, len); 972 } 973 974 rd->skb = skb; /* remember skb for tx-complete stats */ 975 976 rd_set_count(rd, len); 977 rd_set_status(rd, status); /* not yet active! */ 978 979 /* give dma buffer back to busmaster-hw (flush caches to make 980 * CPU-driven changes visible from the pci bus). 981 */ 982 983 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir); 984 985/* Switching to TX mode here races with the controller 986 * which may stop TX at any time when fetching an inactive descriptor 987 * or one with CLR_ENTX set. So we switch on TX only, if TX was not running 988 * _after_ the new descriptor was activated on the ring. This ensures 989 * we will either find TX already stopped or we can be sure, there 990 * will be a TX-complete interrupt even if the chip stopped doing 991 * TX just after we found it still running. The ISR will then find 992 * the non-empty ring and restart TX processing. The enclosing 993 * spinlock provides the correct serialization to prevent race with isr. 994 */ 995 996 spin_lock_irqsave(&idev->lock,flags); 997 998 rd_activate(rd); 999 1000 if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { 1001 int fifocnt; 1002 1003 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1004 if (fifocnt != 0) { 1005 pr_debug("%s: rx fifo not empty(%d)\n", 1006 __func__, fifocnt); 1007 } 1008 1009 config = inw(iobase+VLSI_PIO_IRCFG); 1010 mb(); 1011 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1012 wmb(); 1013 outw(0, iobase+VLSI_PIO_PROMPT); 1014 } 1015 1016 if (ring_put(r) == NULL) { 1017 netif_stop_queue(ndev); 1018 pr_debug("%s: tx ring full - queue stopped\n", __func__); 1019 } 1020 spin_unlock_irqrestore(&idev->lock, flags); 1021 1022 return NETDEV_TX_OK; 1023 1024drop_unlock: 1025 spin_unlock_irqrestore(&idev->lock, flags); 1026drop: 1027 net_warn_ratelimited("%s: dropping packet - %s\n", __func__, msg); 1028 dev_kfree_skb_any(skb); 1029 ndev->stats.tx_errors++; 1030 ndev->stats.tx_dropped++; 1031 /* Don't even think about returning NET_XMIT_DROP (=1) here! 1032 * In fact any retval!=0 causes the packet scheduler to requeue the 1033 * packet for later retry of transmission - which isn't exactly 1034 * what we want after we've just called dev_kfree_skb_any ;-) 1035 */ 1036 return NETDEV_TX_OK; 1037} 1038 1039static void vlsi_tx_interrupt(struct net_device *ndev) 1040{ 1041 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1042 struct vlsi_ring *r = idev->tx_ring; 1043 struct ring_descr *rd; 1044 unsigned iobase; 1045 int ret; 1046 u16 config; 1047 1048 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 1049 1050 if (rd_is_active(rd)) 1051 break; 1052 1053 ret = vlsi_process_tx(r, rd); 1054 1055 if (ret < 0) { 1056 ret = -ret; 1057 ndev->stats.tx_errors++; 1058 if (ret & VLSI_TX_DROP) 1059 ndev->stats.tx_dropped++; 1060 if (ret & VLSI_TX_FIFO) 1061 ndev->stats.tx_fifo_errors++; 1062 } 1063 else if (ret > 0){ 1064 ndev->stats.tx_packets++; 1065 ndev->stats.tx_bytes += ret; 1066 } 1067 } 1068 1069 iobase = ndev->base_addr; 1070 1071 if (idev->new_baud && rd == NULL) /* tx ring empty and speed change pending */ 1072 vlsi_set_baud(idev, iobase); 1073 1074 config = inw(iobase+VLSI_PIO_IRCFG); 1075 if (rd == NULL) /* tx ring empty: re-enable rx */ 1076 outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG); 1077 1078 else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) { 1079 int fifocnt; 1080 1081 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1082 if (fifocnt != 0) { 1083 pr_debug("%s: rx fifo not empty(%d)\n", 1084 __func__, fifocnt); 1085 } 1086 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG); 1087 } 1088 1089 outw(0, iobase+VLSI_PIO_PROMPT); 1090 1091 if (netif_queue_stopped(ndev) && !idev->new_baud) { 1092 netif_wake_queue(ndev); 1093 pr_debug("%s: queue awoken\n", __func__); 1094 } 1095} 1096 1097/* caller must have stopped the controller from busmastering */ 1098 1099static void vlsi_unarm_tx(vlsi_irda_dev_t *idev) 1100{ 1101 struct net_device *ndev = pci_get_drvdata(idev->pdev); 1102 struct vlsi_ring *r = idev->tx_ring; 1103 struct ring_descr *rd; 1104 int ret; 1105 1106 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) { 1107 1108 ret = 0; 1109 if (rd_is_active(rd)) { 1110 rd_set_status(rd, 0); 1111 rd_set_count(rd, 0); 1112 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 1113 if (rd->skb) { 1114 dev_kfree_skb_any(rd->skb); 1115 rd->skb = NULL; 1116 } 1117 pr_debug("%s - dropping tx packet\n", __func__); 1118 ret = -VLSI_TX_DROP; 1119 } 1120 else 1121 ret = vlsi_process_tx(r, rd); 1122 1123 if (ret < 0) { 1124 ret = -ret; 1125 ndev->stats.tx_errors++; 1126 if (ret & VLSI_TX_DROP) 1127 ndev->stats.tx_dropped++; 1128 if (ret & VLSI_TX_FIFO) 1129 ndev->stats.tx_fifo_errors++; 1130 } 1131 else if (ret > 0){ 1132 ndev->stats.tx_packets++; 1133 ndev->stats.tx_bytes += ret; 1134 } 1135 } 1136 1137} 1138 1139/********************************************************/ 1140 1141static int vlsi_start_clock(struct pci_dev *pdev) 1142{ 1143 u8 clkctl, lock; 1144 int i, count; 1145 1146 if (clksrc < 2) { /* auto or PLL: try PLL */ 1147 clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP; 1148 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1149 1150 /* procedure to detect PLL lock synchronisation: 1151 * after 0.5 msec initial delay we expect to find 3 PLL lock 1152 * indications within 10 msec for successful PLL detection. 1153 */ 1154 udelay(500); 1155 count = 0; 1156 for (i = 500; i <= 10000; i += 50) { /* max 10 msec */ 1157 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock); 1158 if (lock&CLKCTL_LOCK) { 1159 if (++count >= 3) 1160 break; 1161 } 1162 udelay(50); 1163 } 1164 if (count < 3) { 1165 if (clksrc == 1) { /* explicitly asked for PLL hence bail out */ 1166 net_err_ratelimited("%s: no PLL or failed to lock!\n", 1167 __func__); 1168 clkctl = CLKCTL_CLKSTP; 1169 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1170 return -1; 1171 } 1172 else /* was: clksrc=0(auto) */ 1173 clksrc = 3; /* fallback to 40MHz XCLK (OB800) */ 1174 1175 pr_debug("%s: PLL not locked, fallback to clksrc=%d\n", 1176 __func__, clksrc); 1177 } 1178 else 1179 clksrc = 1; /* got successful PLL lock */ 1180 } 1181 1182 if (clksrc != 1) { 1183 /* we get here if either no PLL detected in auto-mode or 1184 an external clock source was explicitly specified */ 1185 1186 clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP; 1187 if (clksrc == 3) 1188 clkctl |= CLKCTL_XCKSEL; 1189 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1190 1191 /* no way to test for working XCLK */ 1192 } 1193 else 1194 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl); 1195 1196 /* ok, now going to connect the chip with the clock source */ 1197 1198 clkctl &= ~CLKCTL_CLKSTP; 1199 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1200 1201 return 0; 1202} 1203 1204static void vlsi_stop_clock(struct pci_dev *pdev) 1205{ 1206 u8 clkctl; 1207 1208 /* disconnect chip from clock source */ 1209 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl); 1210 clkctl |= CLKCTL_CLKSTP; 1211 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1212 1213 /* disable all clock sources */ 1214 clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV); 1215 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl); 1216} 1217 1218/********************************************************/ 1219 1220/* writing all-zero to the VLSI PCI IO register area seems to prevent 1221 * some occasional situations where the hardware fails (symptoms are 1222 * what appears as stalled tx/rx state machines, i.e. everything ok for 1223 * receive or transmit but hw makes no progress or is unable to access 1224 * the bus memory locations). 1225 * Best place to call this is immediately after/before the internal clock 1226 * gets started/stopped. 1227 */ 1228 1229static inline void vlsi_clear_regs(unsigned iobase) 1230{ 1231 unsigned i; 1232 const unsigned chip_io_extent = 32; 1233 1234 for (i = 0; i < chip_io_extent; i += sizeof(u16)) 1235 outw(0, iobase + i); 1236} 1237 1238static int vlsi_init_chip(struct pci_dev *pdev) 1239{ 1240 struct net_device *ndev = pci_get_drvdata(pdev); 1241 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1242 unsigned iobase; 1243 u16 ptr; 1244 1245 /* start the clock and clean the registers */ 1246 1247 if (vlsi_start_clock(pdev)) { 1248 net_err_ratelimited("%s: no valid clock source\n", __func__); 1249 return -1; 1250 } 1251 iobase = ndev->base_addr; 1252 vlsi_clear_regs(iobase); 1253 1254 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */ 1255 1256 outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */ 1257 1258 /* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */ 1259 1260 outw(0, iobase+VLSI_PIO_IRCFG); 1261 wmb(); 1262 1263 outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */ 1264 1265 outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE); 1266 1267 outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size), 1268 iobase+VLSI_PIO_RINGSIZE); 1269 1270 ptr = inw(iobase+VLSI_PIO_RINGPTR); 1271 atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr)); 1272 atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr)); 1273 atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr)); 1274 atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr)); 1275 1276 vlsi_set_baud(idev, iobase); /* idev->new_baud used as provided by caller */ 1277 1278 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */ 1279 wmb(); 1280 1281 /* DO NOT BLINDLY ENABLE IRINTR_ACTEN! 1282 * basically every received pulse fires an ACTIVITY-INT 1283 * leading to >>1000 INT's per second instead of few 10 1284 */ 1285 1286 outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR); 1287 1288 return 0; 1289} 1290 1291static int vlsi_start_hw(vlsi_irda_dev_t *idev) 1292{ 1293 struct pci_dev *pdev = idev->pdev; 1294 struct net_device *ndev = pci_get_drvdata(pdev); 1295 unsigned iobase = ndev->base_addr; 1296 u8 byte; 1297 1298 /* we don't use the legacy UART, disable its address decoding */ 1299 1300 pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte); 1301 byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST); 1302 pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte); 1303 1304 /* enable PCI busmaster access to our 16MB page */ 1305 1306 pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE); 1307 pci_set_master(pdev); 1308 1309 if (vlsi_init_chip(pdev) < 0) { 1310 pci_disable_device(pdev); 1311 return -1; 1312 } 1313 1314 vlsi_fill_rx(idev->rx_ring); 1315 1316 idev->last_rx = ktime_get(); /* first mtt may start from now on */ 1317 1318 outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */ 1319 1320 return 0; 1321} 1322 1323static int vlsi_stop_hw(vlsi_irda_dev_t *idev) 1324{ 1325 struct pci_dev *pdev = idev->pdev; 1326 struct net_device *ndev = pci_get_drvdata(pdev); 1327 unsigned iobase = ndev->base_addr; 1328 unsigned long flags; 1329 1330 spin_lock_irqsave(&idev->lock,flags); 1331 outw(0, iobase+VLSI_PIO_IRENABLE); 1332 outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */ 1333 1334 /* disable and w/c irqs */ 1335 outb(0, iobase+VLSI_PIO_IRINTR); 1336 wmb(); 1337 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); 1338 spin_unlock_irqrestore(&idev->lock,flags); 1339 1340 vlsi_unarm_tx(idev); 1341 vlsi_unarm_rx(idev); 1342 1343 vlsi_clear_regs(iobase); 1344 vlsi_stop_clock(pdev); 1345 1346 pci_disable_device(pdev); 1347 1348 return 0; 1349} 1350 1351/**************************************************************/ 1352 1353static void vlsi_tx_timeout(struct net_device *ndev) 1354{ 1355 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1356 1357 1358 vlsi_reg_debug(ndev->base_addr, __func__); 1359 vlsi_ring_debug(idev->tx_ring); 1360 1361 if (netif_running(ndev)) 1362 netif_stop_queue(ndev); 1363 1364 vlsi_stop_hw(idev); 1365 1366 /* now simply restart the whole thing */ 1367 1368 if (!idev->new_baud) 1369 idev->new_baud = idev->baud; /* keep current baudrate */ 1370 1371 if (vlsi_start_hw(idev)) 1372 net_err_ratelimited("%s: failed to restart hw - %s(%s) unusable!\n", 1373 __func__, pci_name(idev->pdev), ndev->name); 1374 else 1375 netif_start_queue(ndev); 1376} 1377 1378static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 1379{ 1380 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1381 struct if_irda_req *irq = (struct if_irda_req *) rq; 1382 unsigned long flags; 1383 u16 fifocnt; 1384 int ret = 0; 1385 1386 switch (cmd) { 1387 case SIOCSBANDWIDTH: 1388 if (!capable(CAP_NET_ADMIN)) { 1389 ret = -EPERM; 1390 break; 1391 } 1392 spin_lock_irqsave(&idev->lock, flags); 1393 idev->new_baud = irq->ifr_baudrate; 1394 /* when called from userland there might be a minor race window here 1395 * if the stack tries to change speed concurrently - which would be 1396 * pretty strange anyway with the userland having full control... 1397 */ 1398 vlsi_set_baud(idev, ndev->base_addr); 1399 spin_unlock_irqrestore(&idev->lock, flags); 1400 break; 1401 case SIOCSMEDIABUSY: 1402 if (!capable(CAP_NET_ADMIN)) { 1403 ret = -EPERM; 1404 break; 1405 } 1406 irda_device_set_media_busy(ndev, TRUE); 1407 break; 1408 case SIOCGRECEIVING: 1409 /* the best we can do: check whether there are any bytes in rx fifo. 1410 * The trustable window (in case some data arrives just afterwards) 1411 * may be as short as 1usec or so at 4Mbps. 1412 */ 1413 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK; 1414 irq->ifr_receiving = (fifocnt!=0) ? 1 : 0; 1415 break; 1416 default: 1417 net_warn_ratelimited("%s: notsupp - cmd=%04x\n", 1418 __func__, cmd); 1419 ret = -EOPNOTSUPP; 1420 } 1421 1422 return ret; 1423} 1424 1425/********************************************************/ 1426 1427static irqreturn_t vlsi_interrupt(int irq, void *dev_instance) 1428{ 1429 struct net_device *ndev = dev_instance; 1430 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1431 unsigned iobase; 1432 u8 irintr; 1433 int boguscount = 5; 1434 unsigned long flags; 1435 int handled = 0; 1436 1437 iobase = ndev->base_addr; 1438 spin_lock_irqsave(&idev->lock,flags); 1439 do { 1440 irintr = inb(iobase+VLSI_PIO_IRINTR); 1441 mb(); 1442 outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */ 1443 1444 if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */ 1445 break; 1446 1447 handled = 1; 1448 1449 if (unlikely(!(irintr & ~IRINTR_ACTIVITY))) 1450 break; /* nothing todo if only activity */ 1451 1452 if (irintr&IRINTR_RPKTINT) 1453 vlsi_rx_interrupt(ndev); 1454 1455 if (irintr&IRINTR_TPKTINT) 1456 vlsi_tx_interrupt(ndev); 1457 1458 } while (--boguscount > 0); 1459 spin_unlock_irqrestore(&idev->lock,flags); 1460 1461 if (boguscount <= 0) 1462 net_info_ratelimited("%s: too much work in interrupt!\n", 1463 __func__); 1464 return IRQ_RETVAL(handled); 1465} 1466 1467/********************************************************/ 1468 1469static int vlsi_open(struct net_device *ndev) 1470{ 1471 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1472 int err = -EAGAIN; 1473 char hwname[32]; 1474 1475 if (pci_request_regions(idev->pdev, drivername)) { 1476 net_warn_ratelimited("%s: io resource busy\n", __func__); 1477 goto errout; 1478 } 1479 ndev->base_addr = pci_resource_start(idev->pdev,0); 1480 ndev->irq = idev->pdev->irq; 1481 1482 /* under some rare occasions the chip apparently comes up with 1483 * IRQ's pending. We better w/c pending IRQ and disable them all 1484 */ 1485 1486 outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR); 1487 1488 if (request_irq(ndev->irq, vlsi_interrupt, IRQF_SHARED, 1489 drivername, ndev)) { 1490 net_warn_ratelimited("%s: couldn't get IRQ: %d\n", 1491 __func__, ndev->irq); 1492 goto errout_io; 1493 } 1494 1495 if ((err = vlsi_create_hwif(idev)) != 0) 1496 goto errout_irq; 1497 1498 sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr); 1499 idev->irlap = irlap_open(ndev,&idev->qos,hwname); 1500 if (!idev->irlap) 1501 goto errout_free_ring; 1502 1503 idev->last_rx = ktime_get(); /* first mtt may start from now on */ 1504 1505 idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */ 1506 1507 if ((err = vlsi_start_hw(idev)) != 0) 1508 goto errout_close_irlap; 1509 1510 netif_start_queue(ndev); 1511 1512 net_info_ratelimited("%s: device %s operational\n", 1513 __func__, ndev->name); 1514 1515 return 0; 1516 1517errout_close_irlap: 1518 irlap_close(idev->irlap); 1519errout_free_ring: 1520 vlsi_destroy_hwif(idev); 1521errout_irq: 1522 free_irq(ndev->irq,ndev); 1523errout_io: 1524 pci_release_regions(idev->pdev); 1525errout: 1526 return err; 1527} 1528 1529static int vlsi_close(struct net_device *ndev) 1530{ 1531 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1532 1533 netif_stop_queue(ndev); 1534 1535 if (idev->irlap) 1536 irlap_close(idev->irlap); 1537 idev->irlap = NULL; 1538 1539 vlsi_stop_hw(idev); 1540 1541 vlsi_destroy_hwif(idev); 1542 1543 free_irq(ndev->irq,ndev); 1544 1545 pci_release_regions(idev->pdev); 1546 1547 net_info_ratelimited("%s: device %s stopped\n", __func__, ndev->name); 1548 1549 return 0; 1550} 1551 1552static const struct net_device_ops vlsi_netdev_ops = { 1553 .ndo_open = vlsi_open, 1554 .ndo_stop = vlsi_close, 1555 .ndo_start_xmit = vlsi_hard_start_xmit, 1556 .ndo_do_ioctl = vlsi_ioctl, 1557 .ndo_tx_timeout = vlsi_tx_timeout, 1558}; 1559 1560static int vlsi_irda_init(struct net_device *ndev) 1561{ 1562 vlsi_irda_dev_t *idev = netdev_priv(ndev); 1563 struct pci_dev *pdev = idev->pdev; 1564 1565 ndev->irq = pdev->irq; 1566 ndev->base_addr = pci_resource_start(pdev,0); 1567 1568 /* PCI busmastering 1569 * see include file for details why we need these 2 masks, in this order! 1570 */ 1571 1572 if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW) || 1573 pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) { 1574 net_err_ratelimited("%s: aborting due to PCI BM-DMA address limitations\n", 1575 __func__); 1576 return -1; 1577 } 1578 1579 irda_init_max_qos_capabilies(&idev->qos); 1580 1581 /* the VLSI82C147 does not support 576000! */ 1582 1583 idev->qos.baud_rate.bits = IR_2400 | IR_9600 1584 | IR_19200 | IR_38400 | IR_57600 | IR_115200 1585 | IR_1152000 | (IR_4000000 << 8); 1586 1587 idev->qos.min_turn_time.bits = qos_mtt_bits; 1588 1589 irda_qos_bits_to_value(&idev->qos); 1590 1591 /* currently no public media definitions for IrDA */ 1592 1593 ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA; 1594 ndev->if_port = IF_PORT_UNKNOWN; 1595 1596 ndev->netdev_ops = &vlsi_netdev_ops; 1597 ndev->watchdog_timeo = 500*HZ/1000; /* max. allowed turn time for IrLAP */ 1598 1599 SET_NETDEV_DEV(ndev, &pdev->dev); 1600 1601 return 0; 1602} 1603 1604/**************************************************************/ 1605 1606static int 1607vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1608{ 1609 struct net_device *ndev; 1610 vlsi_irda_dev_t *idev; 1611 1612 if (pci_enable_device(pdev)) 1613 goto out; 1614 else 1615 pdev->current_state = 0; /* hw must be running now */ 1616 1617 net_info_ratelimited("%s: IrDA PCI controller %s detected\n", 1618 drivername, pci_name(pdev)); 1619 1620 if ( !pci_resource_start(pdev,0) || 1621 !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) { 1622 net_err_ratelimited("%s: bar 0 invalid", __func__); 1623 goto out_disable; 1624 } 1625 1626 ndev = alloc_irdadev(sizeof(*idev)); 1627 if (ndev==NULL) { 1628 net_err_ratelimited("%s: Unable to allocate device memory.\n", 1629 __func__); 1630 goto out_disable; 1631 } 1632 1633 idev = netdev_priv(ndev); 1634 1635 spin_lock_init(&idev->lock); 1636 mutex_init(&idev->mtx); 1637 mutex_lock(&idev->mtx); 1638 idev->pdev = pdev; 1639 1640 if (vlsi_irda_init(ndev) < 0) 1641 goto out_freedev; 1642 1643 if (register_netdev(ndev) < 0) { 1644 net_err_ratelimited("%s: register_netdev failed\n", __func__); 1645 goto out_freedev; 1646 } 1647 1648 if (vlsi_proc_root != NULL) { 1649 struct proc_dir_entry *ent; 1650 1651 ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO, 1652 vlsi_proc_root, VLSI_PROC_FOPS, ndev); 1653 if (!ent) { 1654 net_warn_ratelimited("%s: failed to create proc entry\n", 1655 __func__); 1656 } else { 1657 proc_set_size(ent, 0); 1658 } 1659 idev->proc_entry = ent; 1660 } 1661 net_info_ratelimited("%s: registered device %s\n", 1662 drivername, ndev->name); 1663 1664 pci_set_drvdata(pdev, ndev); 1665 mutex_unlock(&idev->mtx); 1666 1667 return 0; 1668 1669out_freedev: 1670 mutex_unlock(&idev->mtx); 1671 free_netdev(ndev); 1672out_disable: 1673 pci_disable_device(pdev); 1674out: 1675 return -ENODEV; 1676} 1677 1678static void vlsi_irda_remove(struct pci_dev *pdev) 1679{ 1680 struct net_device *ndev = pci_get_drvdata(pdev); 1681 vlsi_irda_dev_t *idev; 1682 1683 if (!ndev) { 1684 net_err_ratelimited("%s: lost netdevice?\n", drivername); 1685 return; 1686 } 1687 1688 unregister_netdev(ndev); 1689 1690 idev = netdev_priv(ndev); 1691 mutex_lock(&idev->mtx); 1692 if (idev->proc_entry) { 1693 remove_proc_entry(ndev->name, vlsi_proc_root); 1694 idev->proc_entry = NULL; 1695 } 1696 mutex_unlock(&idev->mtx); 1697 1698 free_netdev(ndev); 1699 1700 net_info_ratelimited("%s: %s removed\n", drivername, pci_name(pdev)); 1701} 1702 1703#ifdef CONFIG_PM 1704 1705/* The Controller doesn't provide PCI PM capabilities as defined by PCI specs. 1706 * Some of the Linux PCI-PM code however depends on this, for example in 1707 * pci_set_power_state(). So we have to take care to perform the required 1708 * operations on our own (particularly reflecting the pdev->current_state) 1709 * otherwise we might get cheated by pci-pm. 1710 */ 1711 1712 1713static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state) 1714{ 1715 struct net_device *ndev = pci_get_drvdata(pdev); 1716 vlsi_irda_dev_t *idev; 1717 1718 if (!ndev) { 1719 net_err_ratelimited("%s - %s: no netdevice\n", 1720 __func__, pci_name(pdev)); 1721 return 0; 1722 } 1723 idev = netdev_priv(ndev); 1724 mutex_lock(&idev->mtx); 1725 if (pdev->current_state != 0) { /* already suspended */ 1726 if (state.event > pdev->current_state) { /* simply go deeper */ 1727 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1728 pdev->current_state = state.event; 1729 } 1730 else 1731 net_err_ratelimited("%s - %s: invalid suspend request %u -> %u\n", 1732 __func__, pci_name(pdev), 1733 pdev->current_state, state.event); 1734 mutex_unlock(&idev->mtx); 1735 return 0; 1736 } 1737 1738 if (netif_running(ndev)) { 1739 netif_device_detach(ndev); 1740 vlsi_stop_hw(idev); 1741 pci_save_state(pdev); 1742 if (!idev->new_baud) 1743 /* remember speed settings to restore on resume */ 1744 idev->new_baud = idev->baud; 1745 } 1746 1747 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 1748 pdev->current_state = state.event; 1749 idev->resume_ok = 1; 1750 mutex_unlock(&idev->mtx); 1751 return 0; 1752} 1753 1754static int vlsi_irda_resume(struct pci_dev *pdev) 1755{ 1756 struct net_device *ndev = pci_get_drvdata(pdev); 1757 vlsi_irda_dev_t *idev; 1758 1759 if (!ndev) { 1760 net_err_ratelimited("%s - %s: no netdevice\n", 1761 __func__, pci_name(pdev)); 1762 return 0; 1763 } 1764 idev = netdev_priv(ndev); 1765 mutex_lock(&idev->mtx); 1766 if (pdev->current_state == 0) { 1767 mutex_unlock(&idev->mtx); 1768 net_warn_ratelimited("%s - %s: already resumed\n", 1769 __func__, pci_name(pdev)); 1770 return 0; 1771 } 1772 1773 pci_set_power_state(pdev, PCI_D0); 1774 pdev->current_state = PM_EVENT_ON; 1775 1776 if (!idev->resume_ok) { 1777 /* should be obsolete now - but used to happen due to: 1778 * - pci layer initially setting pdev->current_state = 4 (unknown) 1779 * - pci layer did not walk the save_state-tree (might be APM problem) 1780 * so we could not refuse to suspend from undefined state 1781 * - vlsi_irda_suspend detected invalid state and refused to save 1782 * configuration for resume - but was too late to stop suspending 1783 * - vlsi_irda_resume got screwed when trying to resume from garbage 1784 * 1785 * now we explicitly set pdev->current_state = 0 after enabling the 1786 * device and independently resume_ok should catch any garbage config. 1787 */ 1788 net_warn_ratelimited("%s - hm, nothing to resume?\n", __func__); 1789 mutex_unlock(&idev->mtx); 1790 return 0; 1791 } 1792 1793 if (netif_running(ndev)) { 1794 pci_restore_state(pdev); 1795 vlsi_start_hw(idev); 1796 netif_device_attach(ndev); 1797 } 1798 idev->resume_ok = 0; 1799 mutex_unlock(&idev->mtx); 1800 return 0; 1801} 1802 1803#endif /* CONFIG_PM */ 1804 1805/*********************************************************/ 1806 1807static struct pci_driver vlsi_irda_driver = { 1808 .name = drivername, 1809 .id_table = vlsi_irda_table, 1810 .probe = vlsi_irda_probe, 1811 .remove = vlsi_irda_remove, 1812#ifdef CONFIG_PM 1813 .suspend = vlsi_irda_suspend, 1814 .resume = vlsi_irda_resume, 1815#endif 1816}; 1817 1818#define PROC_DIR ("driver/" DRIVER_NAME) 1819 1820static int __init vlsi_mod_init(void) 1821{ 1822 int i, ret; 1823 1824 if (clksrc < 0 || clksrc > 3) { 1825 net_err_ratelimited("%s: invalid clksrc=%d\n", 1826 drivername, clksrc); 1827 return -1; 1828 } 1829 1830 for (i = 0; i < 2; i++) { 1831 switch(ringsize[i]) { 1832 case 4: 1833 case 8: 1834 case 16: 1835 case 32: 1836 case 64: 1837 break; 1838 default: 1839 net_warn_ratelimited("%s: invalid %s ringsize %d, using default=8\n", 1840 drivername, 1841 i ? "rx" : "tx", 1842 ringsize[i]); 1843 ringsize[i] = 8; 1844 break; 1845 } 1846 } 1847 1848 sirpulse = !!sirpulse; 1849 1850 /* proc_mkdir returns NULL if !CONFIG_PROC_FS. 1851 * Failure to create the procfs entry is handled like running 1852 * without procfs - it's not required for the driver to work. 1853 */ 1854 vlsi_proc_root = proc_mkdir(PROC_DIR, NULL); 1855 1856 ret = pci_register_driver(&vlsi_irda_driver); 1857 1858 if (ret && vlsi_proc_root) 1859 remove_proc_entry(PROC_DIR, NULL); 1860 return ret; 1861 1862} 1863 1864static void __exit vlsi_mod_exit(void) 1865{ 1866 pci_unregister_driver(&vlsi_irda_driver); 1867 if (vlsi_proc_root) 1868 remove_proc_entry(PROC_DIR, NULL); 1869} 1870 1871module_init(vlsi_mod_init); 1872module_exit(vlsi_mod_exit); 1873