vmdq             3258 drivers/net/ethernet/intel/i40e/i40e_common.c 				p->vmdq = true;
vmdq             11965 drivers/net/ethernet/intel/i40e/i40e_main.c 	if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
vmdq              288 drivers/net/ethernet/intel/i40e/i40e_type.h 	bool vmdq;
vmdq              784 drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
vmdq              797 drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c 	rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
vmdq              808 drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
vmdq             1839 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
vmdq             1852 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c 	hw->mac.ops.set_vmdq(hw, index, vmdq);
vmdq             2958 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
vmdq             2978 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c 	if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
vmdq             2987 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c 	} else if (vmdq < 32) {
vmdq             2988 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c 		mpsar_lo &= ~BIT(vmdq);
vmdq             2991 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c 		mpsar_hi &= ~BIT(vmdq - 32);
vmdq             3009 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
vmdq             3020 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c 	if (vmdq < 32) {
vmdq             3022 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c 		mpsar |= BIT(vmdq);
vmdq             3026 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c 		mpsar |= BIT(vmdq - 32);
vmdq             3042 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
vmdq             3046 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c 	if (vmdq < 32) {
vmdq             3047 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq));
vmdq             3051 drivers/net/ethernet/intel/ixgbe/ixgbe_common.c 		IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32));
vmdq               48 drivers/net/ethernet/intel/ixgbe/ixgbe_common.h s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
vmdq               67 drivers/net/ethernet/intel/ixgbe/ixgbe_common.h s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
vmdq               68 drivers/net/ethernet/intel/ixgbe/ixgbe_common.h s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
vmdq               69 drivers/net/ethernet/intel/ixgbe/ixgbe_common.h s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
vmdq               22 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
vmdq               36 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
vmdq               39 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		if ((reg_idx & ~vmdq->mask) >= tcs) {
vmdq               41 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
vmdq               47 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
vmdq               50 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		if ((reg_idx & ~vmdq->mask) >= tcs)
vmdq               51 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
vmdq               66 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
vmdq               69 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
vmdq               71 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
vmdq               77 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
vmdq               79 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
vmdq              187 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
vmdq              198 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
vmdq              206 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 		if ((reg_idx & ~vmdq->mask) >= rss->indices) {
vmdq              208 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
vmdq              222 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
vmdq              231 drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
vmdq             7560 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
vmdq             7561 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
vmdq             9280 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
vmdq             9289 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c 			*queue = vf * __ALIGN_MASK(1, ~vmdq->mask);
vmdq              697 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
vmdq              699 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
vmdq              808 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
vmdq              809 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
vmdq              827 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
vmdq              833 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
vmdq             1034 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
vmdq             1051 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
vmdq             1052 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
vmdq             1517 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
vmdq             1556 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c 	queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
vmdq             3388 drivers/net/ethernet/intel/ixgbe/ixgbe_type.h 				  u32 *vmdq);
vmdq              427 drivers/net/ethernet/intel/ixgbevf/vf.c 			      u32 vmdq)
vmdq              463 drivers/net/ethernet/intel/ixgbevf/vf.c 				 u32 vmdq)
vmdq               21 drivers/net/ethernet/intel/ixgbevf/vf.h 				  u32 *vmdq);