rfd 686 drivers/cdrom/cdrom.c struct rwrt_feature_desc *rfd) rfd 702 drivers/cdrom/cdrom.c memcpy(rfd, &buffer[sizeof(struct feature_header)], sizeof (*rfd)); rfd 733 drivers/cdrom/cdrom.c struct rwrt_feature_desc rfd; rfd 738 drivers/cdrom/cdrom.c if ((ret = cdrom_get_random_writable(cdi, &rfd))) rfd 741 drivers/cdrom/cdrom.c if (CDF_RWRT == be16_to_cpu(rfd.feature_code)) rfd 845 drivers/cdrom/cdrom.c struct rwrt_feature_desc rfd; rfd 851 drivers/cdrom/cdrom.c if ((ret = cdrom_get_random_writable(cdi, &rfd))) rfd 853 drivers/cdrom/cdrom.c else if (CDF_RWRT == be16_to_cpu(rfd.feature_code)) rfd 854 drivers/cdrom/cdrom.c ret = !rfd.curr; rfd 2012 drivers/net/ethernet/agere/et131x.c struct rfd *rfd; rfd 2020 drivers/net/ethernet/agere/et131x.c rfd = list_entry(rx_ring->recv_list.next, rfd 2021 drivers/net/ethernet/agere/et131x.c struct rfd, list_node); rfd 2023 drivers/net/ethernet/agere/et131x.c list_del(&rfd->list_node); rfd 2024 drivers/net/ethernet/agere/et131x.c rfd->skb = NULL; rfd 2025 drivers/net/ethernet/agere/et131x.c kfree(rfd); rfd 2090 drivers/net/ethernet/agere/et131x.c struct rfd *rfd; rfd 2096 drivers/net/ethernet/agere/et131x.c rfd = kzalloc(sizeof(*rfd), GFP_ATOMIC | GFP_DMA); rfd 2097 drivers/net/ethernet/agere/et131x.c if (!rfd) rfd 2100 drivers/net/ethernet/agere/et131x.c rfd->skb = NULL; rfd 2103 drivers/net/ethernet/agere/et131x.c list_add_tail(&rfd->list_node, &rx_ring->recv_list); rfd 2127 drivers/net/ethernet/agere/et131x.c static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) rfd 2131 drivers/net/ethernet/agere/et131x.c u16 buff_index = rfd->bufferindex; rfd 2132 drivers/net/ethernet/agere/et131x.c u8 ring_index = rfd->ringindex; rfd 2172 drivers/net/ethernet/agere/et131x.c list_add_tail(&rfd->list_node, &rx_local->recv_list); rfd 2186 drivers/net/ethernet/agere/et131x.c static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) rfd 2191 drivers/net/ethernet/agere/et131x.c struct rfd *rfd; rfd 2249 drivers/net/ethernet/agere/et131x.c rfd = list_entry(element, struct rfd, list_node); rfd 2251 drivers/net/ethernet/agere/et131x.c if (!rfd) { rfd 2256 drivers/net/ethernet/agere/et131x.c list_del(&rfd->list_node); rfd 2261 drivers/net/ethernet/agere/et131x.c rfd->bufferindex = buff_index; rfd 2262 drivers/net/ethernet/agere/et131x.c rfd->ringindex = ring_index; rfd 2270 drivers/net/ethernet/agere/et131x.c rfd->len = 0; rfd 2277 drivers/net/ethernet/agere/et131x.c rfd->len = len; rfd 2279 drivers/net/ethernet/agere/et131x.c skb = dev_alloc_skb(rfd->len + 2); rfd 2283 drivers/net/ethernet/agere/et131x.c adapter->netdev->stats.rx_bytes += rfd->len; rfd 2285 drivers/net/ethernet/agere/et131x.c skb_put_data(skb, fbr->virt[buff_index], rfd->len); rfd 2292 drivers/net/ethernet/agere/et131x.c nic_return_rfd(adapter, rfd); rfd 2293 drivers/net/ethernet/agere/et131x.c return rfd; rfd 2298 drivers/net/ethernet/agere/et131x.c struct rfd *rfd = NULL; rfd 2315 drivers/net/ethernet/agere/et131x.c rfd = nic_rx_pkts(adapter); rfd 2317 drivers/net/ethernet/agere/et131x.c if (rfd == NULL) rfd 2327 drivers/net/ethernet/agere/et131x.c rfd->len == 0) rfd 2278 drivers/net/ethernet/amd/xgbe/xgbe-dev.c unsigned int rfa, rfd; rfd 2285 drivers/net/ethernet/amd/xgbe/xgbe-dev.c rfd = rfa + frame_fifo_size; rfd 2286 drivers/net/ethernet/amd/xgbe/xgbe-dev.c if (rfd > XGMAC_FLOW_CONTROL_MAX) rfd 2287 drivers/net/ethernet/amd/xgbe/xgbe-dev.c rfd = XGMAC_FLOW_CONTROL_MAX; rfd 2323 drivers/net/ethernet/amd/xgbe/xgbe-dev.c rfd = rfa + (frame_fifo_size / 2); rfd 2330 drivers/net/ethernet/amd/xgbe/xgbe-dev.c rfd = rfa + frame_fifo_size; rfd 2335 drivers/net/ethernet/amd/xgbe/xgbe-dev.c pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd); rfd 60 drivers/net/ethernet/atheros/alx/alx.h struct alx_rfd *rfd; rfd 86 drivers/net/ethernet/atheros/alx/main.c struct alx_rfd *rfd = &rxq->rfd[cur]; rfd 123 drivers/net/ethernet/atheros/alx/main.c rfd->addr = cpu_to_le64(dma); rfd 642 drivers/net/ethernet/atheros/alx/main.c rxq->rfd = alx->descmem.virt + offset; rfd 294 drivers/net/ethernet/i825xx/82596.c struct i596_rfd *rfd; rfd 460 drivers/net/ethernet/i825xx/82596.c struct i596_rfd *rfd; rfd 470 drivers/net/ethernet/i825xx/82596.c lp->scb.cmd, lp->scb.rfd); rfd 481 drivers/net/ethernet/i825xx/82596.c rfd = lp->rfd_head; rfd 482 drivers/net/ethernet/i825xx/82596.c printk(KERN_ERR "rfd_head = %p\n", rfd); rfd 486 drivers/net/ethernet/i825xx/82596.c rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd, rfd 487 drivers/net/ethernet/i825xx/82596.c rfd->count); rfd 488 drivers/net/ethernet/i825xx/82596.c rfd = rfd->v_next; rfd 489 drivers/net/ethernet/i825xx/82596.c } while (rfd != lp->rfd_head); rfd 544 drivers/net/ethernet/i825xx/82596.c struct i596_rfd *rfd; rfd 575 drivers/net/ethernet/i825xx/82596.c for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) { rfd 576 drivers/net/ethernet/i825xx/82596.c rfd->rbd = I596_NULL; rfd 577 drivers/net/ethernet/i825xx/82596.c rfd->v_next = rfd+1; rfd 578 drivers/net/ethernet/i825xx/82596.c rfd->v_prev = rfd-1; rfd 579 drivers/net/ethernet/i825xx/82596.c rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1)); rfd 580 drivers/net/ethernet/i825xx/82596.c rfd->cmd = CMD_FLEX; rfd 583 drivers/net/ethernet/i825xx/82596.c lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); rfd 584 drivers/net/ethernet/i825xx/82596.c rfd = lp->rfds; rfd 585 drivers/net/ethernet/i825xx/82596.c rfd->rbd = lp->rbd_head; rfd 586 drivers/net/ethernet/i825xx/82596.c rfd->v_prev = lp->rfds + rx_ring_size - 1; rfd 587 drivers/net/ethernet/i825xx/82596.c rfd = lp->rfds + rx_ring_size - 1; rfd 588 drivers/net/ethernet/i825xx/82596.c rfd->v_next = lp->rfds; rfd 589 drivers/net/ethernet/i825xx/82596.c rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds)); rfd 590 drivers/net/ethernet/i825xx/82596.c rfd->cmd = CMD_EOL|CMD_FLEX; rfd 609 drivers/net/ethernet/i825xx/82596.c lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); rfd 748 drivers/net/ethernet/i825xx/82596.c struct i596_rfd *rfd; rfd 755 drivers/net/ethernet/i825xx/82596.c rfd = lp->rfd_head; /* Ref next frame to check */ rfd 757 drivers/net/ethernet/i825xx/82596.c while ((rfd->stat) & STAT_C) { /* Loop while complete frames */ rfd 758 drivers/net/ethernet/i825xx/82596.c if (rfd->rbd == I596_NULL) rfd 760 drivers/net/ethernet/i825xx/82596.c else if (rfd->rbd == lp->rbd_head->b_addr) rfd 768 drivers/net/ethernet/i825xx/82596.c rfd, rfd->rbd, rfd->stat)); rfd 770 drivers/net/ethernet/i825xx/82596.c if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) { rfd 828 drivers/net/ethernet/i825xx/82596.c dev->name, rfd->stat)); rfd 830 drivers/net/ethernet/i825xx/82596.c if ((rfd->stat) & 0x0001) rfd 832 drivers/net/ethernet/i825xx/82596.c if ((rfd->stat) & 0x0080) rfd 834 drivers/net/ethernet/i825xx/82596.c if ((rfd->stat) & 0x0100) rfd 836 drivers/net/ethernet/i825xx/82596.c if ((rfd->stat) & 0x0200) rfd 838 drivers/net/ethernet/i825xx/82596.c if ((rfd->stat) & 0x0400) rfd 840 drivers/net/ethernet/i825xx/82596.c if ((rfd->stat) & 0x0800) rfd 842 drivers/net/ethernet/i825xx/82596.c if ((rfd->stat) & 0x1000) rfd 855 drivers/net/ethernet/i825xx/82596.c rfd->rbd = I596_NULL; rfd 856 drivers/net/ethernet/i825xx/82596.c rfd->stat = 0; rfd 857 drivers/net/ethernet/i825xx/82596.c rfd->cmd = CMD_EOL|CMD_FLEX; rfd 858 drivers/net/ethernet/i825xx/82596.c rfd->count = 0; rfd 862 drivers/net/ethernet/i825xx/82596.c rfd->v_prev->cmd = CMD_FLEX; rfd 866 drivers/net/ethernet/i825xx/82596.c lp->scb.rfd = rfd->b_next; rfd 867 drivers/net/ethernet/i825xx/82596.c lp->rfd_head = rfd->v_next; rfd 868 drivers/net/ethernet/i825xx/82596.c rfd = lp->rfd_head; rfd 1218 drivers/net/ethernet/i825xx/82596.c lp->scb.rfd = I596_NULL; rfd 278 drivers/net/ethernet/i825xx/lib82596.c u32 rfd; rfd 408 drivers/net/ethernet/i825xx/lib82596.c struct i596_rfd *rfd; rfd 418 drivers/net/ethernet/i825xx/lib82596.c SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd)); rfd 433 drivers/net/ethernet/i825xx/lib82596.c rfd = lp->rfd_head; rfd 434 drivers/net/ethernet/i825xx/lib82596.c printk(KERN_DEBUG "rfd_head = %p\n", rfd); rfd 439 drivers/net/ethernet/i825xx/lib82596.c rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd), rfd 440 drivers/net/ethernet/i825xx/lib82596.c SWAP32(rfd->b_next), SWAP32(rfd->rbd), rfd 441 drivers/net/ethernet/i825xx/lib82596.c SWAP16(rfd->count)); rfd 442 drivers/net/ethernet/i825xx/lib82596.c rfd = rfd->v_next; rfd 443 drivers/net/ethernet/i825xx/lib82596.c } while (rfd != lp->rfd_head); rfd 465 drivers/net/ethernet/i825xx/lib82596.c struct i596_rfd *rfd; rfd 494 drivers/net/ethernet/i825xx/lib82596.c for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) { rfd 495 drivers/net/ethernet/i825xx/lib82596.c rfd->rbd = I596_NULL; rfd 496 drivers/net/ethernet/i825xx/lib82596.c rfd->v_next = rfd+1; rfd 497 drivers/net/ethernet/i825xx/lib82596.c rfd->v_prev = rfd-1; rfd 498 drivers/net/ethernet/i825xx/lib82596.c rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1)); rfd 499 drivers/net/ethernet/i825xx/lib82596.c rfd->cmd = SWAP16(CMD_FLEX); rfd 502 drivers/net/ethernet/i825xx/lib82596.c dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); rfd 503 drivers/net/ethernet/i825xx/lib82596.c rfd = dma->rfds; rfd 504 drivers/net/ethernet/i825xx/lib82596.c rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head)); rfd 505 drivers/net/ethernet/i825xx/lib82596.c rfd->v_prev = dma->rfds + rx_ring_size - 1; rfd 506 drivers/net/ethernet/i825xx/lib82596.c rfd = dma->rfds + rx_ring_size - 1; rfd 507 drivers/net/ethernet/i825xx/lib82596.c rfd->v_next = dma->rfds; rfd 508 drivers/net/ethernet/i825xx/lib82596.c rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds)); rfd 509 drivers/net/ethernet/i825xx/lib82596.c rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); rfd 546 drivers/net/ethernet/i825xx/lib82596.c dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); rfd 627 drivers/net/ethernet/i825xx/lib82596.c dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); rfd 651 drivers/net/ethernet/i825xx/lib82596.c struct i596_rfd *rfd; rfd 660 drivers/net/ethernet/i825xx/lib82596.c rfd = lp->rfd_head; /* Ref next frame to check */ rfd 662 drivers/net/ethernet/i825xx/lib82596.c DMA_INV(dev, rfd, sizeof(struct i596_rfd)); rfd 663 drivers/net/ethernet/i825xx/lib82596.c while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */ rfd 664 drivers/net/ethernet/i825xx/lib82596.c if (rfd->rbd == I596_NULL) rfd 666 drivers/net/ethernet/i825xx/lib82596.c else if (rfd->rbd == lp->rbd_head->b_addr) { rfd 676 drivers/net/ethernet/i825xx/lib82596.c rfd, rfd->rbd, rfd->stat)); rfd 678 drivers/net/ethernet/i825xx/lib82596.c if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) { rfd 745 drivers/net/ethernet/i825xx/lib82596.c dev->name, rfd->stat)); rfd 747 drivers/net/ethernet/i825xx/lib82596.c if (rfd->stat & SWAP16(0x0100)) rfd 749 drivers/net/ethernet/i825xx/lib82596.c if (rfd->stat & SWAP16(0x8000)) rfd 751 drivers/net/ethernet/i825xx/lib82596.c if (rfd->stat & SWAP16(0x0001)) rfd 753 drivers/net/ethernet/i825xx/lib82596.c if (rfd->stat & SWAP16(0x0002)) rfd 755 drivers/net/ethernet/i825xx/lib82596.c if (rfd->stat & SWAP16(0x0004)) rfd 757 drivers/net/ethernet/i825xx/lib82596.c if (rfd->stat & SWAP16(0x0008)) rfd 759 drivers/net/ethernet/i825xx/lib82596.c if (rfd->stat & SWAP16(0x0010)) rfd 773 drivers/net/ethernet/i825xx/lib82596.c rfd->rbd = I596_NULL; rfd 774 drivers/net/ethernet/i825xx/lib82596.c rfd->stat = 0; rfd 775 drivers/net/ethernet/i825xx/lib82596.c rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); rfd 776 drivers/net/ethernet/i825xx/lib82596.c rfd->count = 0; rfd 780 drivers/net/ethernet/i825xx/lib82596.c lp->dma->scb.rfd = rfd->b_next; rfd 781 drivers/net/ethernet/i825xx/lib82596.c lp->rfd_head = rfd->v_next; rfd 782 drivers/net/ethernet/i825xx/lib82596.c DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd)); rfd 786 drivers/net/ethernet/i825xx/lib82596.c rfd->v_prev->cmd = SWAP16(CMD_FLEX); rfd 787 drivers/net/ethernet/i825xx/lib82596.c DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd)); rfd 788 drivers/net/ethernet/i825xx/lib82596.c rfd = lp->rfd_head; rfd 789 drivers/net/ethernet/i825xx/lib82596.c DMA_INV(dev, rfd, sizeof(struct i596_rfd)); rfd 1082 drivers/net/ethernet/i825xx/lib82596.c dma->scb.rfd = I596_NULL; rfd 641 drivers/net/ethernet/i825xx/sun3_82586.c volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr; rfd 646 drivers/net/ethernet/i825xx/sun3_82586.c memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd)); rfd 647 drivers/net/ethernet/i825xx/sun3_82586.c p->rfd_first = rfd; rfd 650 drivers/net/ethernet/i825xx/sun3_82586.c rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) ); rfd 651 drivers/net/ethernet/i825xx/sun3_82586.c rfd[i].rbd_offset = 0xffff; rfd 653 drivers/net/ethernet/i825xx/sun3_82586.c rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */ rfd 655 drivers/net/ethernet/i825xx/sun3_82586.c ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) ); rfd 550 drivers/net/ethernet/intel/e100.c struct rfd blank_rfd; rfd 1923 drivers/net/ethernet/intel/e100.c #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) rfd 1930 drivers/net/ethernet/intel/e100.c skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); rfd 1945 drivers/net/ethernet/intel/e100.c struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; rfd 1948 drivers/net/ethernet/intel/e100.c sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); rfd 1959 drivers/net/ethernet/intel/e100.c struct rfd *rfd = (struct rfd *)skb->data; rfd 1968 drivers/net/ethernet/intel/e100.c sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); rfd 1969 drivers/net/ethernet/intel/e100.c rfd_status = le16_to_cpu(rfd->status); rfd 1982 drivers/net/ethernet/intel/e100.c if ((le16_to_cpu(rfd->command) & cb_el) && rfd 1988 drivers/net/ethernet/intel/e100.c sizeof(struct rfd), rfd 1996 drivers/net/ethernet/intel/e100.c actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; rfd 1997 drivers/net/ethernet/intel/e100.c if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) rfd 1998 drivers/net/ethernet/intel/e100.c actual_size = RFD_BUF_LEN - sizeof(struct rfd); rfd 2010 drivers/net/ethernet/intel/e100.c if ((le16_to_cpu(rfd->command) & cb_el) && rfd 2018 drivers/net/ethernet/intel/e100.c skb_reserve(skb, sizeof(struct rfd)); rfd 2059 drivers/net/ethernet/intel/e100.c struct rfd *old_before_last_rfd, *new_before_last_rfd; rfd 2080 drivers/net/ethernet/intel/e100.c old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; rfd 2100 drivers/net/ethernet/intel/e100.c (struct rfd *)new_before_last_rx->skb->data; rfd 2104 drivers/net/ethernet/intel/e100.c new_before_last_rx->dma_addr, sizeof(struct rfd), rfd 2112 drivers/net/ethernet/intel/e100.c old_before_last_rx->dma_addr, sizeof(struct rfd), rfd 2117 drivers/net/ethernet/intel/e100.c old_before_last_rx->dma_addr, sizeof(struct rfd), rfd 2156 drivers/net/ethernet/intel/e100.c struct rfd *before_last; rfd 2180 drivers/net/ethernet/intel/e100.c before_last = (struct rfd *)rx->skb->data; rfd 2184 drivers/net/ethernet/intel/e100.c sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); rfd 2386 drivers/net/ethernet/intel/e100.c if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd), rfd 235 drivers/net/ethernet/qualcomm/emac/emac-mac.c #define EMAC_RFD(RXQ, SIZE, IDX) ((RXQ)->rfd.v_addr + (SIZE * (IDX))) rfd 238 drivers/net/ethernet/qualcomm/emac/emac-mac.c #define GET_RFD_BUFFER(RXQ, IDX) (&((RXQ)->rfd.rfbuff[(IDX)])) rfd 315 drivers/net/ethernet/qualcomm/emac/emac-mac.c writel(upper_32_bits(adpt->rx_q.rfd.dma_addr), rfd 318 drivers/net/ethernet/qualcomm/emac/emac-mac.c writel(lower_32_bits(adpt->rx_q.rfd.dma_addr), rfd 323 drivers/net/ethernet/qualcomm/emac/emac-mac.c writel(adpt->rx_q.rfd.count & RFD_RING_SIZE_BMSK, rfd 629 drivers/net/ethernet/qualcomm/emac/emac-mac.c if (!rx_q->rfd.rfbuff) rfd 632 drivers/net/ethernet/qualcomm/emac/emac-mac.c for (i = 0; i < rx_q->rfd.count; i++) { rfd 646 drivers/net/ethernet/qualcomm/emac/emac-mac.c size = sizeof(struct emac_buffer) * rx_q->rfd.count; rfd 647 drivers/net/ethernet/qualcomm/emac/emac-mac.c memset(rx_q->rfd.rfbuff, 0, size); rfd 654 drivers/net/ethernet/qualcomm/emac/emac-mac.c memset(rx_q->rfd.v_addr, 0, rx_q->rfd.size); rfd 655 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.produce_idx = 0; rfd 656 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.consume_idx = 0; rfd 703 drivers/net/ethernet/qualcomm/emac/emac-mac.c kfree(rx_q->rfd.rfbuff); rfd 704 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.rfbuff = NULL; rfd 706 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.v_addr = NULL; rfd 707 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.dma_addr = 0; rfd 708 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.size = 0; rfd 723 drivers/net/ethernet/qualcomm/emac/emac-mac.c size = sizeof(struct emac_buffer) * rx_q->rfd.count; rfd 724 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.rfbuff = kzalloc_node(size, GFP_KERNEL, node); rfd 725 drivers/net/ethernet/qualcomm/emac/emac-mac.c if (!rx_q->rfd.rfbuff) rfd 729 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.size = rx_q->rfd.count * (adpt->rfd_size * 4); rfd 735 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.dma_addr = ring_header->dma_addr + ring_header->used; rfd 736 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.v_addr = ring_header->v_addr + ring_header->used; rfd 737 drivers/net/ethernet/qualcomm/emac/emac-mac.c ring_header->used += ALIGN(rx_q->rfd.size, 8); rfd 742 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.produce_idx = 0; rfd 743 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.consume_idx = 0; rfd 760 drivers/net/ethernet/qualcomm/emac/emac-mac.c adpt->rx_q.rfd.count = adpt->rx_desc_cnt; rfd 838 drivers/net/ethernet/qualcomm/emac/emac-mac.c adpt->rx_q.rfd.produce_idx = 0; rfd 839 drivers/net/ethernet/qualcomm/emac/emac-mac.c adpt->rx_q.rfd.consume_idx = 0; rfd 840 drivers/net/ethernet/qualcomm/emac/emac-mac.c for (i = 0; i < adpt->rx_q.rfd.count; i++) rfd 841 drivers/net/ethernet/qualcomm/emac/emac-mac.c adpt->rx_q.rfd.rfbuff[i].dma_addr = 0; rfd 849 drivers/net/ethernet/qualcomm/emac/emac-mac.c u32 *hw_rfd = EMAC_RFD(rx_q, adpt->rfd_size, rx_q->rfd.produce_idx); rfd 854 drivers/net/ethernet/qualcomm/emac/emac-mac.c if (++rx_q->rfd.produce_idx == rx_q->rfd.count) rfd 855 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.produce_idx = 0; rfd 867 drivers/net/ethernet/qualcomm/emac/emac-mac.c next_produce_idx = rx_q->rfd.produce_idx + 1; rfd 868 drivers/net/ethernet/qualcomm/emac/emac-mac.c if (next_produce_idx == rx_q->rfd.count) rfd 871 drivers/net/ethernet/qualcomm/emac/emac-mac.c curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx); rfd 897 drivers/net/ethernet/qualcomm/emac/emac-mac.c next_produce_idx = rx_q->rfd.produce_idx + 1; rfd 898 drivers/net/ethernet/qualcomm/emac/emac-mac.c if (next_produce_idx == rx_q->rfd.count) rfd 901 drivers/net/ethernet/qualcomm/emac/emac-mac.c curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx); rfd 907 drivers/net/ethernet/qualcomm/emac/emac-mac.c u32 prod_idx = (rx_q->rfd.produce_idx << rx_q->produce_shift) & rfd 1057 drivers/net/ethernet/qualcomm/emac/emac-mac.c struct emac_buffer *rfbuf = rx_q->rfd.rfbuff; rfd 1063 drivers/net/ethernet/qualcomm/emac/emac-mac.c if (++consume_idx == rx_q->rfd.count) rfd 1067 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.consume_idx = consume_idx; rfd 1068 drivers/net/ethernet/qualcomm/emac/emac-mac.c rx_q->rfd.process_idx = consume_idx; rfd 1159 drivers/net/ethernet/qualcomm/emac/emac-mac.c proc_idx = (rx_q->rfd.process_idx << rx_q->process_shft) & rfd 175 drivers/net/ethernet/qualcomm/emac/emac-mac.h struct emac_rfd_ring rfd; rfd 225 drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c unsigned int rfd, rfa; rfd 240 drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c rfd = 0x03; /* Full-2.5K */ rfd 245 drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c rfd = 0x06; /* Full-4K */ rfd 250 drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c rfd = 0x06; /* Full-4K */ rfd 255 drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c rfd = 0x06; /* Full-4K */ rfd 261 drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT; rfd 164 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c unsigned int rfd, rfa; rfd 179 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c rfd = 0x03; /* Full-2.5K */ rfd 184 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c rfd = 0x06; /* Full-4K */ rfd 189 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c rfd = 0x06; /* Full-4K */ rfd 194 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c rfd = 0x06; /* Full-4K */ rfd 200 drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c flow |= rfd << XGMAC_RFD_SHIFT; rfd 116 include/net/9p/client.h int rfd; rfd 52 net/9p/trans_fd.c int rfd; rfd 723 net/9p/trans_fd.c if (clnt->trans_opts.fd.rfd != ~0) rfd 724 net/9p/trans_fd.c seq_printf(m, ",rfd=%u", clnt->trans_opts.fd.rfd); rfd 747 net/9p/trans_fd.c opts->rfd = ~0; rfd 781 net/9p/trans_fd.c opts->rfd = option; rfd 798 net/9p/trans_fd.c static int p9_fd_open(struct p9_client *client, int rfd, int wfd) rfd 805 net/9p/trans_fd.c ts->rd = fget(rfd); rfd 1043 net/9p/trans_fd.c client->trans_opts.fd.rfd = opts.rfd; rfd 1046 net/9p/trans_fd.c if (opts.rfd == ~0 || opts.wfd == ~0) { rfd 1051 net/9p/trans_fd.c err = p9_fd_open(client, opts.rfd, opts.wfd);