num_pg 301 drivers/gpu/drm/omapdrm/tcm.h static inline s32 tcm_1d_limit(struct tcm_area *a, u32 num_pg) num_pg 303 drivers/gpu/drm/omapdrm/tcm.h if (__tcm_sizeof(a) < num_pg) num_pg 305 drivers/gpu/drm/omapdrm/tcm.h if (!num_pg) num_pg 308 drivers/gpu/drm/omapdrm/tcm.h a->p1.x = (a->p0.x + num_pg - 1) % a->tcm->width; num_pg 309 drivers/gpu/drm/omapdrm/tcm.h a->p1.y = a->p0.y + ((a->p0.x + num_pg - 1) / a->tcm->width); num_pg 897 drivers/lightnvm/core.c ppa.g.pg = geo->num_pg - 1; num_pg 926 drivers/lightnvm/core.c for (pg = 0; pg < geo->num_pg; pg++) { num_pg 1246 drivers/lightnvm/pblk.h ppa->g.pg < geo->num_pg && num_pg 1349 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c hdev->tm_info.num_pg = 1; num_pg 352 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h u8 num_pg; /* It must be 1 if vNET-Base schd */ num_pg 620 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c for (i = 0; i < hdev->tm_info.num_pg; i++) { num_pg 676 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c for (i = 0; i < hdev->tm_info.num_pg; i++) { num_pg 699 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c for (i = 0; i < hdev->tm_info.num_pg; i++) { num_pg 740 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c for (i = 0; i < hdev->tm_info.num_pg; i++) { num_pg 1124 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c for (i = 0; i < hdev->tm_info.num_pg; i++) { num_pg 1434 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c hdev->tm_info.num_pg != 1) num_pg 117 drivers/nvme/host/lightnvm.c __le16 num_pg; num_pg 317 drivers/nvme/host/lightnvm.c pg_per_blk = le16_to_cpu(src->num_pg); num_pg 367 drivers/nvme/host/lightnvm.c geo->num_pg = le16_to_cpu(src->num_pg); num_pg 1063 drivers/nvme/host/lightnvm.c return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg); num_pg 395 include/linux/lightnvm.h u16 num_pg; num_pg 612 include/linux/lightnvm.h if (pg == geo->num_pg) { num_pg 479 include/linux/skbuff.h unsigned int num_pg; num_pg 1057 net/core/skbuff.c unsigned long max_pg, num_pg, new_pg, old_pg; num_pg 1063 net/core/skbuff.c num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ num_pg 1069 net/core/skbuff.c new_pg = old_pg + num_pg; num_pg 1077 net/core/skbuff.c mmp->num_pg = num_pg; num_pg 1079 net/core/skbuff.c mmp->num_pg += num_pg; num_pg 1089 net/core/skbuff.c atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);