rxv               611 drivers/net/ethernet/ti/cpsw.c 	pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
rxv               780 drivers/net/ethernet/ti/cpsw.c 	ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
rxv               865 drivers/net/ethernet/ti/cpsw.c 		cpsw->rxv[i].budget = ch_budget;
rxv               870 drivers/net/ethernet/ti/cpsw.c 		cpsw->rxv[0].budget += budget;
rxv               960 drivers/net/ethernet/ti/cpsw.c 	struct cpsw_vector	*rxv;
rxv               968 drivers/net/ethernet/ti/cpsw.c 		rxv = &cpsw->rxv[ch];
rxv               969 drivers/net/ethernet/ti/cpsw.c 		if (unlikely(rxv->budget > budget - num_rx))
rxv               972 drivers/net/ethernet/ti/cpsw.c 			cur_budget = rxv->budget;
rxv               974 drivers/net/ethernet/ti/cpsw.c 		num_rx += cpdma_chan_process(rxv->ch, cur_budget);
rxv               992 drivers/net/ethernet/ti/cpsw.c 	num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
rxv              1363 drivers/net/ethernet/ti/cpsw.c 		ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
rxv              1376 drivers/net/ethernet/ti/cpsw.c 			ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
rxv              2866 drivers/net/ethernet/ti/cpsw.c 	cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
rxv              2867 drivers/net/ethernet/ti/cpsw.c 	if (IS_ERR(cpsw->rxv[0].ch)) {
rxv              2869 drivers/net/ethernet/ti/cpsw.c 		ret = PTR_ERR(cpsw->rxv[0].ch);
rxv               285 drivers/net/ethernet/ti/cpsw_ethtool.c 		cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
rxv               542 drivers/net/ethernet/ti/cpsw_ethtool.c 		vec = cpsw->rxv;
rxv               340 drivers/net/ethernet/ti/cpsw_priv.h 	struct cpsw_vector		rxv[CPSW_MAX_QUEUES];
rxv                16 drivers/net/wireless/mediatek/mt76/mt7615/mac.c static inline s8 to_rssi(u32 field, u32 rxv)
rxv                18 drivers/net/wireless/mediatek/mt76/mt7615/mac.c 	return (FIELD_GET(field, rxv) - 220) / 2;