bd2              1720 arch/x86/platform/uv/tlb_uv.c 	struct bau_desc *bd2;
bd2              1747 arch/x86/platform/uv/tlb_uv.c 	for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
bd2              1748 arch/x86/platform/uv/tlb_uv.c 		memset(bd2, 0, sizeof(struct bau_desc));
bd2              1750 arch/x86/platform/uv/tlb_uv.c 			uv1_hdr = &bd2->header.uv1_hdr;
bd2              1773 arch/x86/platform/uv/tlb_uv.c 			uv2_3_hdr = &bd2->header.uv2_3_hdr;
bd2               643 fs/gfs2/lops.c 	struct gfs2_bufdata *bd1 = NULL, *bd2;
bd2               651 fs/gfs2/lops.c 	bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
bd2               680 fs/gfs2/lops.c 		list_for_each_entry_continue(bd2, blist, bd_list) {
bd2               681 fs/gfs2/lops.c 			get_bh(bd2->bd_bh);
bd2               683 fs/gfs2/lops.c 			lock_buffer(bd2->bd_bh);
bd2               685 fs/gfs2/lops.c 			if (buffer_escaped(bd2->bd_bh)) {
bd2               689 fs/gfs2/lops.c 				kaddr = kmap_atomic(bd2->bd_bh->b_page);
bd2               690 fs/gfs2/lops.c 				memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
bd2               691 fs/gfs2/lops.c 				       bd2->bd_bh->b_size);
bd2               694 fs/gfs2/lops.c 				clear_buffer_escaped(bd2->bd_bh);
bd2               695 fs/gfs2/lops.c 				unlock_buffer(bd2->bd_bh);
bd2               696 fs/gfs2/lops.c 				brelse(bd2->bd_bh);
bd2               699 fs/gfs2/lops.c 				gfs2_log_write_bh(sdp, bd2->bd_bh);