header_cache 115 drivers/infiniband/hw/qib/qib_user_sdma.c struct dma_pool *header_cache; header_cache 214 drivers/infiniband/hw/qib/qib_user_sdma.c pq->header_cache = dma_pool_create(pq->header_cache_name, header_cache 218 drivers/infiniband/hw/qib/qib_user_sdma.c if (!pq->header_cache) header_cache 243 drivers/infiniband/hw/qib/qib_user_sdma.c dma_pool_destroy(pq->header_cache); header_cache 279 drivers/infiniband/hw/qib/qib_user_sdma.c hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL, header_cache 648 drivers/infiniband/hw/qib/qib_user_sdma.c dma_pool_free(pq->header_cache, header_cache 1031 drivers/infiniband/hw/qib/qib_user_sdma.c dma_pool_free(pq->header_cache, pbc, dma_addr); header_cache 1106 drivers/infiniband/hw/qib/qib_user_sdma.c dma_pool_destroy(pq->header_cache); header_cache 82 drivers/mtd/rfd_ftl.c u16 *header_cache; /* cached header */ header_cache 100 drivers/mtd/rfd_ftl.c if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) { header_cache 110 drivers/mtd/rfd_ftl.c entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]); header_cache 184 drivers/mtd/rfd_ftl.c part->header_cache = kmalloc(part->header_size, GFP_KERNEL); header_cache 185 drivers/mtd/rfd_ftl.c if (!part->header_cache) header_cache 207 drivers/mtd/rfd_ftl.c (u_char *)part->header_cache); header_cache 237 drivers/mtd/rfd_ftl.c kfree(part->header_cache); header_cache 530 drivers/mtd/rfd_ftl.c (u_char *)part->header_cache); header_cache 573 drivers/mtd/rfd_ftl.c part->header_cache[offset + HEADER_MAP_OFFSET] = del; header_cache 592 drivers/mtd/rfd_ftl.c if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]) header_cache 649 drivers/mtd/rfd_ftl.c part->header_cache[i + HEADER_MAP_OFFSET] = entry; header_cache 779 drivers/mtd/rfd_ftl.c kfree(part->header_cache);