Searched refs:blp (Results 1 - 8 of 8) sorted by relevance

/linux-4.1.27/fs/xfs/libxfs/
H A Dxfs_dir2_block.c172 struct xfs_dir2_leaf_entry *blp, xfs_dir2_block_need_space()
205 tagp = (__be16 *)blp - 1; xfs_dir2_block_need_space()
216 (uint)sizeof(*blp) < len) xfs_dir2_block_need_space()
218 } else if ((be32_to_cpu(btp->stale) - 1) * (uint)sizeof(*blp) < len) xfs_dir2_block_need_space()
221 dup = (xfs_dir2_data_unused_t *)blp; xfs_dir2_block_need_space()
229 tagp = (__be16 *)blp - 1; xfs_dir2_block_need_space()
257 if (be16_to_cpu(dup->length) < len + (uint)sizeof(*blp)) { xfs_dir2_block_need_space()
285 struct xfs_dir2_leaf_entry *blp, xfs_dir2_block_compact()
298 if (blp[fromidx].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) { xfs_dir2_block_compact()
308 blp[toidx] = blp[fromidx]; xfs_dir2_block_compact()
315 (xfs_dir2_data_aoff_t)((char *)blp - (char *)hdr), xfs_dir2_block_compact()
316 (xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)), xfs_dir2_block_compact()
335 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ xfs_dir2_block_addname() local
375 blp = xfs_dir2_block_leaf_p(btp); xfs_dir2_block_addname()
381 xfs_dir2_block_need_space(dp, hdr, btp, blp, &tagp, &dup, xfs_dir2_block_addname()
417 xfs_dir2_block_compact(args, bp, hdr, btp, blp, &needlog, xfs_dir2_block_addname()
419 /* recalculate blp post-compaction */ xfs_dir2_block_addname()
420 blp = xfs_dir2_block_leaf_p(btp); xfs_dir2_block_addname()
435 if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval) xfs_dir2_block_addname()
442 while (mid >= 0 && be32_to_cpu(blp[mid].hashval) >= args->hashval) { xfs_dir2_block_addname()
455 sizeof(*blp)), xfs_dir2_block_addname()
456 (xfs_dir2_data_aoff_t)sizeof(*blp), xfs_dir2_block_addname()
475 blp--; xfs_dir2_block_addname()
478 memmove(blp, &blp[1], mid * sizeof(*blp)); xfs_dir2_block_addname()
488 blp[lowstale].address != xfs_dir2_block_addname()
494 blp[highstale].address != xfs_dir2_block_addname()
506 memmove(&blp[lowstale], &blp[lowstale + 1], xfs_dir2_block_addname()
507 (mid - lowstale) * sizeof(*blp)); xfs_dir2_block_addname()
518 memmove(&blp[mid + 1], &blp[mid], xfs_dir2_block_addname()
519 (highstale - mid) * sizeof(*blp)); xfs_dir2_block_addname()
532 blp[mid].hashval = cpu_to_be32(args->hashval); xfs_dir2_block_addname()
533 blp[mid].address = cpu_to_be32(xfs_dir2_byte_to_dataptr( xfs_dir2_block_addname()
575 xfs_dir2_leaf_entry_t *blp; xfs_dir2_block_log_leaf() local
579 blp = xfs_dir2_block_leaf_p(btp); xfs_dir2_block_log_leaf()
580 xfs_trans_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)hdr), xfs_dir2_block_log_leaf()
581 (uint)((char *)&blp[last + 1] - (char *)hdr - 1)); xfs_dir2_block_log_leaf()
609 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ xfs_dir2_block_lookup() local
629 blp = xfs_dir2_block_leaf_p(btp); xfs_dir2_block_lookup()
635 be32_to_cpu(blp[ent].address))); xfs_dir2_block_lookup()
657 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ xfs_dir2_block_lookup_int() local
682 blp = xfs_dir2_block_leaf_p(btp); xfs_dir2_block_lookup_int()
690 if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval) xfs_dir2_block_lookup_int()
705 while (mid > 0 && be32_to_cpu(blp[mid - 1].hashval) == args->hashval) { xfs_dir2_block_lookup_int()
713 if ((addr = be32_to_cpu(blp[mid].address)) == XFS_DIR2_NULL_DATAPTR) xfs_dir2_block_lookup_int()
734 be32_to_cpu(blp[mid].hashval) == hash); xfs_dir2_block_lookup_int()
759 xfs_dir2_leaf_entry_t *blp; /* block leaf pointer */ xfs_dir2_block_removename() local
785 blp = xfs_dir2_block_leaf_p(btp); xfs_dir2_block_removename()
791 be32_to_cpu(blp[ent].address))); xfs_dir2_block_removename()
807 blp[ent].address = cpu_to_be32(XFS_DIR2_NULL_DATAPTR); xfs_dir2_block_removename()
839 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ xfs_dir2_block_replace() local
859 blp = xfs_dir2_block_leaf_p(btp); xfs_dir2_block_replace()
865 be32_to_cpu(blp[ent].address))); xfs_dir2_block_replace()
993 * Use up the space at the end of the block (blp/btp). xfs_dir2_leaf_to_block()
1048 xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ xfs_dir2_sf_to_block() local
1143 blp = xfs_dir2_block_leaf_p(btp); xfs_dir2_sf_to_block()
1144 endoffset = (uint)((char *)blp - (char *)hdr); xfs_dir2_sf_to_block()
1162 blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot); xfs_dir2_sf_to_block()
1163 blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr( xfs_dir2_sf_to_block()
1176 blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot); xfs_dir2_sf_to_block()
1177 blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr( xfs_dir2_sf_to_block()
1229 blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops-> xfs_dir2_sf_to_block()
1231 blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr( xfs_dir2_sf_to_block()
1244 xfs_sort(blp, be32_to_cpu(btp->count), sizeof(*blp), xfs_dir2_block_sort); xfs_dir2_sf_to_block()
168 xfs_dir2_block_need_space( struct xfs_inode *dp, struct xfs_dir2_data_hdr *hdr, struct xfs_dir2_block_tail *btp, struct xfs_dir2_leaf_entry *blp, __be16 **tagpp, struct xfs_dir2_data_unused **dupp, struct xfs_dir2_data_unused **enddupp, int *compact, int len) xfs_dir2_block_need_space() argument
280 xfs_dir2_block_compact( struct xfs_da_args *args, struct xfs_buf *bp, struct xfs_dir2_data_hdr *hdr, struct xfs_dir2_block_tail *btp, struct xfs_dir2_leaf_entry *blp, int *needlog, int *lfloghigh, int *lfloglow) xfs_dir2_block_compact() argument
H A Dxfs_dir2_sf.c68 xfs_dir2_leaf_entry_t *blp; /* leaf area of the block */ xfs_dir2_block_sfsize() local
94 blp = xfs_dir2_block_leaf_p(btp); xfs_dir2_block_sfsize()
100 if ((addr = be32_to_cpu(blp[i].address)) == XFS_DIR2_NULL_DATAPTR) xfs_dir2_block_sfsize()
H A Dxfs_dir2_leaf.c377 xfs_dir2_leaf_entry_t *blp; /* block's leaf entries */ xfs_dir2_block_to_leaf() local
417 blp = xfs_dir2_block_leaf_p(btp); xfs_dir2_block_to_leaf()
434 memcpy(ents, blp, be32_to_cpu(btp->count) * sizeof(xfs_dir2_leaf_entry_t)); xfs_dir2_block_to_leaf()
443 (xfs_dir2_data_aoff_t)((char *)blp - (char *)hdr), xfs_dir2_block_to_leaf()
445 (char *)blp), xfs_dir2_block_to_leaf()
/linux-4.1.27/drivers/staging/lustre/lustre/ldlm/
H A Dldlm_lockd.c381 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool; __ldlm_bl_to_thread() local
383 spin_lock(&blp->blp_lock); __ldlm_bl_to_thread()
387 list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list); __ldlm_bl_to_thread()
390 list_add_tail(&blwi->blwi_entry, &blp->blp_list); __ldlm_bl_to_thread()
392 spin_unlock(&blp->blp_lock); __ldlm_bl_to_thread()
394 wake_up(&blp->blp_waitq); __ldlm_bl_to_thread()
722 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp) ldlm_bl_get_work() argument
727 spin_lock(&blp->blp_lock); ldlm_bl_get_work()
729 if (!list_empty(&blp->blp_list) && ldlm_bl_get_work()
730 (list_empty(&blp->blp_prio_list) || num_bl == 0)) ldlm_bl_get_work()
731 blwi = list_entry(blp->blp_list.next, ldlm_bl_get_work()
734 if (!list_empty(&blp->blp_prio_list)) ldlm_bl_get_work()
735 blwi = list_entry(blp->blp_prio_list.next, ldlm_bl_get_work()
740 if (++num_bl >= atomic_read(&blp->blp_num_threads)) ldlm_bl_get_work()
744 spin_unlock(&blp->blp_lock); ldlm_bl_get_work()
759 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp) ldlm_bl_thread_start() argument
761 struct ldlm_bl_thread_data bltd = { .bltd_blp = blp }; ldlm_bl_thread_start()
765 bltd.bltd_num = atomic_read(&blp->blp_num_threads); ldlm_bl_thread_start()
771 atomic_read(&blp->blp_num_threads), PTR_ERR(task)); ldlm_bl_thread_start()
788 struct ldlm_bl_pool *blp; ldlm_bl_thread_main() local
793 blp = bltd->bltd_blp; ldlm_bl_thread_main()
795 atomic_inc(&blp->blp_num_threads); ldlm_bl_thread_main()
796 atomic_inc(&blp->blp_busy_threads); ldlm_bl_thread_main()
807 blwi = ldlm_bl_get_work(blp); ldlm_bl_thread_main()
810 atomic_dec(&blp->blp_busy_threads); ldlm_bl_thread_main()
811 l_wait_event_exclusive(blp->blp_waitq, ldlm_bl_thread_main()
812 (blwi = ldlm_bl_get_work(blp)) != NULL, ldlm_bl_thread_main()
814 busy = atomic_inc_return(&blp->blp_busy_threads); ldlm_bl_thread_main()
816 busy = atomic_read(&blp->blp_busy_threads); ldlm_bl_thread_main()
824 if (unlikely(busy < blp->blp_max_threads && ldlm_bl_thread_main()
825 busy >= atomic_read(&blp->blp_num_threads) && ldlm_bl_thread_main()
828 ldlm_bl_thread_start(blp); ldlm_bl_thread_main()
857 atomic_dec(&blp->blp_busy_threads); ldlm_bl_thread_main()
858 atomic_dec(&blp->blp_num_threads); ldlm_bl_thread_main()
859 complete(&blp->blp_comp); ldlm_bl_thread_main()
1008 struct ldlm_bl_pool *blp = NULL; ldlm_setup() local
1062 OBD_ALLOC(blp, sizeof(*blp)); ldlm_setup()
1063 if (blp == NULL) { ldlm_setup()
1067 ldlm_state->ldlm_bl_pool = blp; ldlm_setup()
1069 spin_lock_init(&blp->blp_lock); ldlm_setup()
1070 INIT_LIST_HEAD(&blp->blp_list); ldlm_setup()
1071 INIT_LIST_HEAD(&blp->blp_prio_list); ldlm_setup()
1072 init_waitqueue_head(&blp->blp_waitq); ldlm_setup()
1073 atomic_set(&blp->blp_num_threads, 0); ldlm_setup()
1074 atomic_set(&blp->blp_busy_threads, 0); ldlm_setup()
1077 blp->blp_min_threads = LDLM_NTHRS_INIT; ldlm_setup()
1078 blp->blp_max_threads = LDLM_NTHRS_MAX; ldlm_setup()
1080 blp->blp_min_threads = blp->blp_max_threads = ldlm_setup()
1085 for (i = 0; i < blp->blp_min_threads; i++) { ldlm_setup()
1086 rc = ldlm_bl_thread_start(blp); ldlm_setup()
1117 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool; ldlm_cleanup() local
1119 while (atomic_read(&blp->blp_num_threads) > 0) { ldlm_cleanup()
1122 init_completion(&blp->blp_comp); ldlm_cleanup()
1124 spin_lock(&blp->blp_lock); ldlm_cleanup()
1125 list_add_tail(&blwi.blwi_entry, &blp->blp_list); ldlm_cleanup()
1126 wake_up(&blp->blp_waitq); ldlm_cleanup()
1127 spin_unlock(&blp->blp_lock); ldlm_cleanup()
1129 wait_for_completion(&blp->blp_comp); ldlm_cleanup()
1132 OBD_FREE(blp, sizeof(*blp)); ldlm_cleanup()
/linux-4.1.27/drivers/crypto/qat/qat_common/
H A Dqat_algs.c630 dma_addr_t blp = qat_req->buf.blp; qat_alg_free_bufl() local
640 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); qat_alg_free_bufl()
642 if (blp != blpout) { qat_alg_free_bufl()
668 dma_addr_t blp; qat_alg_sgl_to_bufl() local
682 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); qat_alg_sgl_to_bufl()
683 if (unlikely(dma_mapping_error(dev, blp))) qat_alg_sgl_to_bufl()
723 qat_req->buf.blp = blp;
768 qat_req->buf.bloutp = qat_req->buf.blp;
781 if (!dma_mapping_error(dev, blp))
782 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
860 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_alg_aead_dec()
904 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_alg_aead_enc_internal()
1032 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_alg_ablkcipher_encrypt()
1070 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp; qat_alg_ablkcipher_decrypt()
H A Dqat_crypto.h71 dma_addr_t blp; member in struct:qat_crypto_request_buffs
/linux-4.1.27/drivers/message/fusion/
H A Dmptctl.c129 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
1019 * blp - point to the buflist pointer
1026 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc) kbuf_alloc_2_sgl()
1043 *blp = NULL; kbuf_alloc_2_sgl()
1134 *blp = buflist; kbuf_alloc_2_sgl()
1025 kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags, struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc) kbuf_alloc_2_sgl() argument
/linux-4.1.27/drivers/staging/media/lirc/
H A Dlirc_serial.c9 * Copyright (C) 1998 Ben Pfaff <blp@gnu.org>

Completed in 221 milliseconds