Lines Matching refs:p
112 #define PORT p->cmdr_addr
254 static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
260 static void ni65_free_buffer(struct priv *p);
271 static void ni65_set_performance(struct priv *p) in ni65_set_performance() argument
275 if( !(cards[p->cardno].config & 0x02) ) in ni65_set_performance()
296 struct priv *p = dev->ml_priv; in ni65_open() local
298 cards[p->cardno].cardname,dev); in ni65_open()
322 struct priv *p = dev->ml_priv; in ni65_close() local
333 if(p->tmd_skb[i]) { in ni65_close()
334 dev_kfree_skb(p->tmd_skb[i]); in ni65_close()
335 p->tmd_skb[i] = NULL; in ni65_close()
346 struct priv *p = dev->ml_priv; in cleanup_card() local
349 release_region(dev->base_addr, cards[p->cardno].total_size); in cleanup_card()
350 ni65_free_buffer(p); in cleanup_card()
421 struct priv *p; in ni65_probe1() local
456 p = dev->ml_priv; in ni65_probe1()
457 p->cmdr_addr = ioaddr + cards[i].cmd_offset; in ni65_probe1()
458 p->cardno = i; in ni65_probe1()
459 spin_lock_init(&p->ring_lock); in ni65_probe1()
461 printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr); in ni65_probe1()
467 ni65_free_buffer(p); in ni65_probe1()
468 release_region(ioaddr, cards[p->cardno].total_size); in ni65_probe1()
480 p->features = INIT_RING_BEFORE_START; in ni65_probe1()
484 p->features = 0x0; in ni65_probe1()
509 ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */ in ni65_probe1()
522 ni65_free_buffer(p); in ni65_probe1()
523 release_region(ioaddr, cards[p->cardno].total_size); in ni65_probe1()
536 ni65_init_lance(p,dev->dev_addr,0,0); in ni65_probe1()
544 ni65_free_buffer(p); in ni65_probe1()
545 release_region(ioaddr, cards[p->cardno].total_size); in ni65_probe1()
554 if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0) in ni65_probe1()
557 ni65_free_buffer(p); in ni65_probe1()
558 release_region(ioaddr, cards[p->cardno].total_size); in ni65_probe1()
572 static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode) in ni65_init_lance() argument
580 p->ib.eaddr[i] = daddr[i]; in ni65_init_lance()
583 p->ib.filter[i] = filter; in ni65_init_lance()
584 p->ib.mode = mode; in ni65_init_lance()
586 p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK; in ni65_init_lance()
587 p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK; in ni65_init_lance()
589 pib = (u32) isa_virt_to_bus(&p->ib); in ni65_init_lance()
644 struct priv *p; in ni65_alloc_buffer() local
654 p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7); in ni65_alloc_buffer()
655 memset((char *)p, 0, sizeof(struct priv)); in ni65_alloc_buffer()
656 p->self = ptr; in ni65_alloc_buffer()
661 p->tmd_skb[i] = NULL; in ni65_alloc_buffer()
663 p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0); in ni65_alloc_buffer()
664 if(!p->tmdbounce[i]) { in ni65_alloc_buffer()
665 ni65_free_buffer(p); in ni65_alloc_buffer()
673 p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1); in ni65_alloc_buffer()
674 if(!p->recv_skb[i]) { in ni65_alloc_buffer()
675 ni65_free_buffer(p); in ni65_alloc_buffer()
679 p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0); in ni65_alloc_buffer()
680 if(!p->recvbounce[i]) { in ni65_alloc_buffer()
681 ni65_free_buffer(p); in ni65_alloc_buffer()
693 static void ni65_free_buffer(struct priv *p) in ni65_free_buffer() argument
697 if(!p) in ni65_free_buffer()
701 kfree(p->tmdbounce[i]); in ni65_free_buffer()
703 if(p->tmd_skb[i]) in ni65_free_buffer()
704 dev_kfree_skb(p->tmd_skb[i]); in ni65_free_buffer()
711 if(p->recv_skb[i]) in ni65_free_buffer()
712 dev_kfree_skb(p->recv_skb[i]); in ni65_free_buffer()
714 kfree(p->recvbounce[i]); in ni65_free_buffer()
717 kfree(p->self); in ni65_free_buffer()
724 static void ni65_stop_start(struct net_device *dev,struct priv *p) in ni65_stop_start() argument
733 if(p->features & INIT_RING_BEFORE_START) { in ni65_stop_start()
741 if(p->xmit_queued) { in ni65_stop_start()
743 if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN)) in ni65_stop_start()
745 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1); in ni65_stop_start()
746 if(p->tmdlast == p->tmdnum) in ni65_stop_start()
752 struct tmd *tmdp = p->tmdhead + i; in ni65_stop_start()
754 skb_save[i] = p->tmd_skb[i]; in ni65_stop_start()
762 struct rmd *rmdp = p->rmdhead + i; in ni65_stop_start()
765 p->tmdnum = p->xmit_queued = 0; in ni65_stop_start()
769 int num = (i + p->tmdlast) & (TMDNUM-1); in ni65_stop_start()
770 …p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer f… in ni65_stop_start()
771 p->tmdhead[i].blen = blen[num]; in ni65_stop_start()
772 if(p->tmdhead[i].u.s.status & XMIT_OWN) { in ni65_stop_start()
773 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1); in ni65_stop_start()
774 p->xmit_queued = 1; in ni65_stop_start()
778 p->tmd_skb[i] = skb_save[num]; in ni65_stop_start()
781 p->rmdnum = p->tmdlast = 0; in ni65_stop_start()
782 if(!p->lock) in ni65_stop_start()
783 if (p->tmdnum || !p->xmit_queued) in ni65_stop_start()
797 struct priv *p = dev->ml_priv; in ni65_lance_reinit() local
800 p->lock = 0; in ni65_lance_reinit()
801 p->xmit_queued = 0; in ni65_lance_reinit()
813 cards[p->cardno].cardname,(int) i); in ni65_lance_reinit()
820 p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0; in ni65_lance_reinit()
823 struct tmd *tmdp = p->tmdhead + i; in ni65_lance_reinit()
825 if(p->tmd_skb[i]) { in ni65_lance_reinit()
826 dev_kfree_skb(p->tmd_skb[i]); in ni65_lance_reinit()
827 p->tmd_skb[i] = NULL; in ni65_lance_reinit()
837 struct rmd *rmdp = p->rmdhead + i; in ni65_lance_reinit()
839 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data); in ni65_lance_reinit()
841 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]); in ni65_lance_reinit()
849 ni65_init_lance(p,dev->dev_addr,0x00,M_PROM); in ni65_lance_reinit()
851 ni65_init_lance(p,dev->dev_addr,0xff,0x0); in ni65_lance_reinit()
853 ni65_init_lance(p,dev->dev_addr,0x00,0x00); in ni65_lance_reinit()
861 ni65_set_performance(p); in ni65_lance_reinit()
880 struct priv *p; in ni65_interrupt() local
883 p = dev->ml_priv; in ni65_interrupt()
885 spin_lock(&p->ring_lock); in ni65_interrupt()
913 printk("%02x ",p->rmdhead[i].u.s.status); in ni65_interrupt()
920 ni65_stop_start(dev,p); in ni65_interrupt()
932 num2 = (p->rmdnum + i) & (RMDNUM-1); in ni65_interrupt()
933 if(!(p->rmdhead[num2].u.s.status & RCV_OWN)) in ni65_interrupt()
940 num1 = (p->rmdnum + k) & (RMDNUM-1); in ni65_interrupt()
941 if(!(p->rmdhead[num1].u.s.status & RCV_OWN)) in ni65_interrupt()
952 sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */ in ni65_interrupt()
956 printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf); in ni65_interrupt()
959 p->rmdnum = num1; in ni65_interrupt()
961 if((p->rmdhead[num2].u.s.status & RCV_OWN)) in ni65_interrupt()
972 ni65_stop_start(dev,p); in ni65_interrupt()
977 spin_unlock(&p->ring_lock); in ni65_interrupt()
987 struct priv *p = dev->ml_priv; in ni65_xmit_intr() local
989 while(p->xmit_queued) in ni65_xmit_intr()
991 struct tmd *tmdp = p->tmdhead + p->tmdlast; in ni65_xmit_intr()
1013 if(p->features & INIT_RING_BEFORE_START) { in ni65_xmit_intr()
1015 ni65_stop_start(dev,p); in ni65_xmit_intr()
1019 ni65_stop_start(dev,p); in ni65_xmit_intr()
1033 if(p->tmd_skb[p->tmdlast]) { in ni65_xmit_intr()
1034 dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]); in ni65_xmit_intr()
1035 p->tmd_skb[p->tmdlast] = NULL; in ni65_xmit_intr()
1039 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1); in ni65_xmit_intr()
1040 if(p->tmdlast == p->tmdnum) in ni65_xmit_intr()
1041 p->xmit_queued = 0; in ni65_xmit_intr()
1054 struct priv *p = dev->ml_priv; in ni65_recv_intr() local
1056 rmdp = p->rmdhead + p->rmdnum; in ni65_recv_intr()
1100 skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len); in ni65_recv_intr()
1103 struct sk_buff *skb1 = p->recv_skb[p->rmdnum]; in ni65_recv_intr()
1105 p->recv_skb[p->rmdnum] = skb; in ni65_recv_intr()
1112 skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len); in ni65_recv_intr()
1132 p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1); in ni65_recv_intr()
1133 rmdp = p->rmdhead + p->rmdnum; in ni65_recv_intr()
1144 struct priv *p = dev->ml_priv; in ni65_timeout() local
1148 printk("%02x ",p->tmdhead[i].u.s.status); in ni65_timeout()
1162 struct priv *p = dev->ml_priv; in ni65_send_packet() local
1166 if (test_and_set_bit(0, (void*)&p->lock)) { in ni65_send_packet()
1180 skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum], in ni65_send_packet()
1184 memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len); in ni65_send_packet()
1187 spin_lock_irqsave(&p->ring_lock, flags); in ni65_send_packet()
1188 tmdp = p->tmdhead + p->tmdnum; in ni65_send_packet()
1189 tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]); in ni65_send_packet()
1190 p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1); in ni65_send_packet()
1195 spin_lock_irqsave(&p->ring_lock, flags); in ni65_send_packet()
1197 tmdp = p->tmdhead + p->tmdnum; in ni65_send_packet()
1199 p->tmd_skb[p->tmdnum] = skb; in ni65_send_packet()
1207 p->xmit_queued = 1; in ni65_send_packet()
1208 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1); in ni65_send_packet()
1210 if(p->tmdnum != p->tmdlast) in ni65_send_packet()
1213 p->lock = 0; in ni65_send_packet()
1215 spin_unlock_irqrestore(&p->ring_lock, flags); in ni65_send_packet()