npgs 27 drivers/net/ethernet/intel/i40e/i40e_xsk.c for (i = 0; i < umem->npgs; i++) { npgs 61 drivers/net/ethernet/intel/i40e/i40e_xsk.c for (i = 0; i < umem->npgs; i++) { npgs 30 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c for (i = 0; i < umem->npgs; i++) { npgs 57 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c for (i = 0; i < umem->npgs; i++) { npgs 15 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c for (i = 0; i < umem->npgs; i++) { npgs 42 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c for (i = 0; i < umem->npgs; i++) { npgs 57 include/net/xdp_sock.h u32 npgs; npgs 186 net/xdp/xdp_umem.c for (i = 0; i < umem->npgs; i++) npgs 196 net/xdp/xdp_umem.c for (i = 0; i < umem->npgs; i++) { npgs 215 net/xdp/xdp_umem.c put_user_pages_dirty_lock(umem->pgs, umem->npgs, true); npgs 224 net/xdp/xdp_umem.c atomic_long_sub(umem->npgs, &umem->user->locked_vm); npgs 285 net/xdp/xdp_umem.c long npgs; npgs 288 net/xdp/xdp_umem.c umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs), npgs 294 net/xdp/xdp_umem.c npgs = get_user_pages(umem->address, umem->npgs, npgs 298 net/xdp/xdp_umem.c if (npgs != umem->npgs) { npgs 299 net/xdp/xdp_umem.c if (npgs >= 0) { npgs 300 net/xdp/xdp_umem.c umem->npgs = npgs; npgs 304 net/xdp/xdp_umem.c err = npgs; npgs 329 net/xdp/xdp_umem.c new_npgs = old_npgs + umem->npgs; npgs 344 net/xdp/xdp_umem.c u64 npgs, addr = mr->addr, size = mr->len; npgs 375 net/xdp/xdp_umem.c npgs = div_u64(size, PAGE_SIZE); npgs 376 net/xdp/xdp_umem.c if (npgs > U32_MAX) npgs 398 net/xdp/xdp_umem.c umem->npgs = (u32)npgs; npgs 415 net/xdp/xdp_umem.c umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL); npgs 602 net/xdp/xsk.c for (i = 0; i < umem->npgs - 1; i++) { npgs 58 net/xdp/xsk_diag.c du.num_pages = umem->npgs;