eb 576 arch/ia64/include/asm/pal.h eb : 1, /* External bus error */ eb 744 arch/ia64/include/asm/pal.h #define pmci_bus_external_error pme_bus.eb eb 424 arch/ia64/kernel/mca_drv.c if (pbci->eb) eb 584 arch/ia64/kernel/mca_drv.c if (psp->bc && pbci->eb && pbci->bsi == 0) { eb 702 arch/ia64/kernel/mca_drv.c if (pbci->eb && pbci->bsi > 0) eb 444 arch/powerpc/kernel/btext.c unsigned int *eb = (int *)expand_bits_16; eb 449 arch/powerpc/kernel/btext.c base[0] = (eb[bits >> 6] & fg) ^ bg; eb 450 arch/powerpc/kernel/btext.c base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg; eb 451 arch/powerpc/kernel/btext.c base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg; eb 452 arch/powerpc/kernel/btext.c base[3] = (eb[bits & 3] & fg) ^ bg; eb 462 arch/powerpc/kernel/btext.c unsigned int *eb = (int *)expand_bits_8; eb 467 arch/powerpc/kernel/btext.c base[0] = (eb[bits >> 4] & fg) ^ bg; eb 468 arch/powerpc/kernel/btext.c base[1] = (eb[bits & 0xf] & fg) ^ bg; eb 268 arch/sparc/kernel/btext.c unsigned int *eb = (int *)expand_bits_16; eb 273 arch/sparc/kernel/btext.c base[0] = (eb[bits >> 6] & fg) ^ bg; eb 274 arch/sparc/kernel/btext.c base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg; eb 275 arch/sparc/kernel/btext.c base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg; eb 276 arch/sparc/kernel/btext.c base[3] = (eb[bits & 3] & fg) ^ bg; eb 286 arch/sparc/kernel/btext.c unsigned int *eb = (int *)expand_bits_8; eb 291 arch/sparc/kernel/btext.c base[0] = (eb[bits >> 4] & fg) ^ bg; eb 292 arch/sparc/kernel/btext.c base[1] = (eb[bits & 0xf] & fg) ^ bg; eb 80 arch/unicore32/include/asm/assembler.h .else; .ifc \cond, eb eb 753 arch/x86/kvm/vmx/vmx.c u32 eb; eb 755 arch/x86/kvm/vmx/vmx.c eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | eb 764 arch/x86/kvm/vmx/vmx.c eb |= (1u << GP_VECTOR); eb 768 arch/x86/kvm/vmx/vmx.c eb |= 1u << BP_VECTOR; eb 770 arch/x86/kvm/vmx/vmx.c eb = ~0; eb 772 arch/x86/kvm/vmx/vmx.c eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ eb 780 arch/x86/kvm/vmx/vmx.c eb |= get_vmcs12(vcpu)->exception_bitmap; eb 782 arch/x86/kvm/vmx/vmx.c vmcs_write32(EXCEPTION_BITMAP, eb); eb 49 arch/x86/mm/numa_emulation.c struct numa_memblk *eb = &ei->blk[ei->nr_blks]; eb 58 arch/x86/mm/numa_emulation.c eb->start = pb->start; eb 59 arch/x86/mm/numa_emulation.c eb->end = pb->start + size; eb 60 arch/x86/mm/numa_emulation.c eb->nid = nid; eb 72 arch/x86/mm/numa_emulation.c nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20); eb 297 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) eb 299 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return intel_engine_requires_cmd_parser(eb->engine) || eb 300 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c (intel_engine_using_cmd_parser(eb->engine) && eb 301 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->args->batch_len); eb 304 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int eb_create(struct i915_execbuffer *eb) eb 306 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { eb 307 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c unsigned int size = 1 + ilog2(eb->buffer_count); eb 333 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->buckets = kzalloc(sizeof(struct hlist_head) << size, eb 335 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb->buckets) eb 342 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->lut_size = size; eb 344 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->lut_size = -eb->buffer_count; eb 381 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_pin_vma(struct i915_execbuffer *eb, eb 435 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_validate_vma(struct i915_execbuffer *eb, eb 439 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (unlikely(entry->flags & eb->invalid_flags)) eb 464 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c entry->handle, (int)(entry - eb->exec)); eb 475 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (!eb->reloc_cache.has_fence) { eb 479 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->reloc_cache.needs_unfenced) && eb 485 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c entry->flags |= eb->context_flags; eb 491 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_add_vma(struct i915_execbuffer *eb, eb 495 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; eb 500 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (!(eb->args->flags & __EXEC_VALIDATED)) { eb 501 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb_validate_vma(eb, entry, vma); eb 506 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb->lut_size > 0) { eb 509 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c &eb->buckets[hash_32(entry->handle, eb 510 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->lut_size)]); eb 514 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c list_add_tail(&vma->reloc_link, &eb->relocs); eb 522 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->vma[i] = vma; eb 523 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->flags[i] = entry->flags; eb 524 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vma->exec_flags = &eb->flags[i]; eb 537 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c !(eb->flags[i] & EXEC_OBJECT_PINNED)) eb 538 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; eb 539 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb->reloc_cache.has_fence) eb 540 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; eb 542 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->batch = vma; eb 546 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb_pin_vma(eb, entry, vma)) { eb 549 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->args->flags |= __EXEC_HAS_RELOC; eb 554 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c list_add_tail(&vma->exec_link, &eb->unbound); eb 580 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int eb_reserve_vma(const struct i915_execbuffer *eb, eb 583 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); eb 617 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->args->flags |= __EXEC_HAS_RELOC; eb 637 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int eb_reserve(struct i915_execbuffer *eb) eb 639 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c const unsigned int count = eb->buffer_count; eb 662 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c list_for_each_entry(vma, &eb->unbound, exec_link) { eb 663 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb_reserve_vma(eb, vma); eb 671 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c INIT_LIST_HEAD(&eb->unbound); eb 674 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c unsigned int flags = eb->flags[i]; eb 675 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct i915_vma *vma = eb->vma[i]; eb 681 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_unreserve_vma(vma, &eb->flags[i]); eb 685 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c list_add(&vma->exec_link, &eb->unbound); eb 688 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c list_add_tail(&vma->exec_link, &eb->unbound); eb 695 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c list_splice_tail(&last, &eb->unbound); eb 703 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = i915_gem_evict_vm(eb->context->vm); eb 714 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static unsigned int eb_batch_index(const struct i915_execbuffer *eb) eb 716 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb->args->flags & I915_EXEC_BATCH_FIRST) eb 719 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return eb->buffer_count - 1; eb 722 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int eb_select_context(struct i915_execbuffer *eb) eb 726 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1); eb 730 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->gem_context = ctx; eb 732 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; eb 734 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->context_flags = 0; eb 736 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS; eb 741 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int eb_lookup_vmas(struct i915_execbuffer *eb) eb 743 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma; eb 748 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (unlikely(i915_gem_context_is_banned(eb->gem_context))) eb 751 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c INIT_LIST_HEAD(&eb->relocs); eb 752 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c INIT_LIST_HEAD(&eb->unbound); eb 754 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c batch = eb_batch_index(eb); eb 756 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c mutex_lock(&eb->gem_context->mutex); eb 757 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (unlikely(i915_gem_context_is_closed(eb->gem_context))) { eb 762 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c for (i = 0; i < eb->buffer_count; i++) { eb 763 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c u32 handle = eb->exec[i].handle; eb 771 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c obj = i915_gem_object_lookup(eb->file, handle); eb 777 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vma = i915_vma_instance(obj, eb->context->vm, NULL); eb 799 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c lut->ctx = eb->gem_context; eb 806 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb_add_vma(eb, i, batch, vma); eb 810 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c GEM_BUG_ON(vma != eb->vma[i]); eb 811 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); eb 813 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i])); eb 816 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c mutex_unlock(&eb->gem_context->mutex); eb 818 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->args->flags |= __EXEC_VALIDATED; eb 819 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return eb_reserve(eb); eb 824 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->vma[i] = NULL; eb 826 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c mutex_unlock(&eb->gem_context->mutex); eb 831 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle) eb 833 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb->lut_size < 0) { eb 834 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (handle >= -eb->lut_size) eb 836 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return eb->vma[handle]; eb 841 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c head = &eb->buckets[hash_32(handle, eb->lut_size)]; eb 850 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static void eb_release_vmas(const struct i915_execbuffer *eb) eb 852 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c const unsigned int count = eb->buffer_count; eb 856 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct i915_vma *vma = eb->vma[i]; eb 857 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c unsigned int flags = eb->flags[i]; eb 862 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); eb 864 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->vma[i] = NULL; eb 874 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static void eb_reset_vmas(const struct i915_execbuffer *eb) eb 876 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_release_vmas(eb); eb 877 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb->lut_size > 0) eb 878 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c memset(eb->buckets, 0, eb 879 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c sizeof(struct hlist_head) << eb->lut_size); eb 882 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static void eb_destroy(const struct i915_execbuffer *eb) eb 884 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c GEM_BUG_ON(eb->reloc_cache.rq); eb 886 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb->lut_size > 0) eb 887 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c kfree(eb->buckets); eb 1142 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int __reloc_gpu_alloc(struct i915_execbuffer *eb, eb 1146 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct reloc_cache *cache = &eb->reloc_cache; eb 1153 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE); eb 1176 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c rq = i915_request_create(eb->context); eb 1190 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb->engine->emit_bb_start(rq, eb 1227 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static u32 *reloc_gpu(struct i915_execbuffer *eb, eb 1231 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct reloc_cache *cache = &eb->reloc_cache; eb 1241 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb_use_cmdparser(eb)) eb 1244 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (!intel_engine_can_store_dword(eb->engine)) eb 1247 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = __reloc_gpu_alloc(eb, vma, len); eb 1261 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct i915_execbuffer *eb, eb 1266 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c bool wide = eb->reloc_cache.use_64bit_reloc; eb 1269 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (!eb->reloc_cache.vaddr && eb 1272 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c const unsigned int gen = eb->reloc_cache.gen; eb 1284 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c batch = reloc_gpu(eb, vma, len); eb 1329 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT); eb 1335 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->reloc_cache.vaddr); eb 1349 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_relocate_entry(struct i915_execbuffer *eb, eb 1357 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c target = eb_get_vma(eb, reloc->target_handle); eb 1394 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c IS_GEN(eb->i915, 6)) { eb 1413 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) { eb 1440 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return relocate_entry(vma, reloc, eb, target); eb 1443 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma) eb 1448 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); eb 1488 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c u64 offset = eb_relocate_entry(eb, vma, r); eb 1526 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c reloc_cache_reset(&eb->reloc_cache); eb 1531 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma) eb 1533 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma); eb 1540 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c u64 offset = eb_relocate_entry(eb, vma, &relocs[i]); eb 1549 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c reloc_cache_reset(&eb->reloc_cache); eb 1580 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int eb_copy_relocations(const struct i915_execbuffer *eb) eb 1583 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c const unsigned int count = eb->buffer_count; eb 1588 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c const unsigned int nreloc = eb->exec[i].relocation_count; eb 1596 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = check_relocations(&eb->exec[i]); eb 1600 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr); eb 1642 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->exec[i].relocs_ptr = (uintptr_t)relocs; eb 1654 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr); eb 1655 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb->exec[i].relocation_count) eb 1661 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int eb_prefault_relocations(const struct i915_execbuffer *eb) eb 1663 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c const unsigned int count = eb->buffer_count; eb 1672 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = check_relocations(&eb->exec[i]); eb 1680 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static noinline int eb_relocate_slow(struct i915_execbuffer *eb) eb 1682 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct drm_device *dev = &eb->i915->drm; eb 1694 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_reset_vmas(eb); eb 1711 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb_prefault_relocations(eb); eb 1713 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb_copy_relocations(eb); eb 1725 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c flush_workqueue(eb->i915->mm.userptr_wq); eb 1734 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb_lookup_vmas(eb); eb 1738 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c GEM_BUG_ON(!eb->batch); eb 1740 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c list_for_each_entry(vma, &eb->relocs, reloc_link) { eb 1743 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb_relocate_vma(eb, vma); eb 1748 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb_relocate_vma_slow(eb, vma); eb 1767 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c const unsigned int count = eb->buffer_count; eb 1772 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c &eb->exec[i]; eb 1786 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int eb_relocate(struct i915_execbuffer *eb) eb 1788 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb_lookup_vmas(eb)) eb 1792 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb->args->flags & __EXEC_HAS_RELOC) { eb 1795 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c list_for_each_entry(vma, &eb->relocs, reloc_link) { eb 1796 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb_relocate_vma(eb, vma)) eb 1804 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return eb_relocate_slow(eb); eb 1807 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int eb_move_to_gpu(struct i915_execbuffer *eb) eb 1809 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c const unsigned int count = eb->buffer_count; eb 1817 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct i915_vma *vma = eb->vma[i]; eb 1830 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c ww_mutex_unlock(&eb->vma[j]->resv->lock); eb 1832 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c swap(eb->flags[i], eb->flags[j]); eb 1833 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c swap(eb->vma[i], eb->vma[j]); eb 1834 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->vma[i]->exec_flags = &eb->flags[i]; eb 1836 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c GEM_BUG_ON(vma != eb->vma[0]); eb 1837 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vma->exec_flags = &eb->flags[0]; eb 1848 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c unsigned int flags = eb->flags[i]; eb 1849 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct i915_vma *vma = eb->vma[i]; eb 1859 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c capture->next = eb->request->capture_list; eb 1861 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->request->capture_list = capture; eb 1884 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c (eb->request, obj, flags & EXEC_OBJECT_WRITE); eb 1888 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = i915_vma_move_to_active(vma, eb->request, flags); eb 1903 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->exec = NULL; eb 1906 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c intel_gt_chipset_flush(eb->engine->gt); eb 1910 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c i915_request_skip(eb->request, err); eb 1964 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c shadow_batch_pin(struct i915_execbuffer *eb, struct drm_i915_gem_object *obj) eb 1966 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct drm_i915_private *dev_priv = eb->i915; eb 1967 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct i915_vma * const vma = *eb->vma; eb 1990 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static struct i915_vma *eb_parse(struct i915_execbuffer *eb) eb 1998 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len); eb 2002 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vma = shadow_batch_pin(eb, pool->obj); eb 2006 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c batch_start = gen8_canonical_addr(eb->batch->node.start) + eb 2007 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->batch_start_offset; eb 2011 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = intel_engine_cmd_parser(eb->gem_context, eb 2012 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->engine, eb 2013 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->batch->obj, eb 2015 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->batch_start_offset, eb 2016 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->batch_len, eb 2029 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (CMDPARSER_USES_GGTT(eb->i915) && (err == -EACCES)) eb 2037 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->vma[eb->buffer_count] = i915_vma_get(vma); eb 2038 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->flags[eb->buffer_count] = eb 2040 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vma->exec_flags = &eb->flags[eb->buffer_count]; eb 2041 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->buffer_count++; eb 2043 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->batch_start_offset = 0; eb 2044 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->batch = vma; eb 2046 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (CMDPARSER_USES_GGTT(eb->i915)) eb 2047 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->batch_flags |= I915_DISPATCH_SECURE; eb 2071 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int eb_submit(struct i915_execbuffer *eb) eb 2075 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb_move_to_gpu(eb); eb 2079 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) { eb 2080 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = i915_reset_gen7_sol_offsets(eb->request); eb 2091 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb->engine->emit_init_breadcrumb) { eb 2092 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb->engine->emit_init_breadcrumb(eb->request); eb 2097 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb->engine->emit_bb_start(eb->request, eb 2098 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->batch->node.start + eb 2099 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->batch_start_offset, eb 2100 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->batch_len, eb 2101 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->batch_flags); eb 2175 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c __eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce) eb 2182 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex); eb 2187 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c mutex_unlock(&eb->i915->drm.struct_mutex); eb 2193 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c __eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce) eb 2198 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c mutex_lock(&eb->i915->drm.struct_mutex); eb 2200 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c mutex_unlock(&eb->i915->drm.struct_mutex); eb 2203 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce) eb 2222 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = __eb_pin_context(eb, ce); eb 2257 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->engine = ce->engine; eb 2258 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb->context = ce; eb 2266 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c __eb_unpin_context(eb, ce); eb 2270 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static void eb_unpin_engine(struct i915_execbuffer *eb) eb 2272 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct intel_context *ce = eb->context; eb 2279 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c __eb_unpin_context(eb, ce); eb 2283 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_select_legacy_ring(struct i915_execbuffer *eb, eb 2287 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct drm_i915_private *i915 = eb->i915; eb 2324 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_pin_engine(struct i915_execbuffer *eb, eb 2332 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (i915_gem_context_user_engines(eb->gem_context)) eb 2335 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c idx = eb_select_legacy_ring(eb, file, args); eb 2337 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c ce = i915_gem_context_get_engine(eb->gem_context, idx); eb 2341 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = __eb_pin_engine(eb, ce); eb 2427 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c await_fence_array(struct i915_execbuffer *eb, eb 2430 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c const unsigned int nfences = eb->args->num_cliprects; eb 2447 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = i915_request_await_dma_fence(eb->request, fence); eb 2457 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c signal_fence_array(struct i915_execbuffer *eb, eb 2460 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c const unsigned int nfences = eb->args->num_cliprects; eb 2461 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct dma_fence * const fence = &eb->request->fence; eb 2484 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct i915_execbuffer eb; eb 2495 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.i915 = i915; eb 2496 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.file = file; eb 2497 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.args = args; eb 2501 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.exec = exec; eb 2502 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1); eb 2503 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.vma[0] = NULL; eb 2504 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1); eb 2506 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS; eb 2507 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c reloc_cache_init(&eb.reloc_cache, eb.i915); eb 2509 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.buffer_count = args->buffer_count; eb 2510 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.batch_start_offset = args->batch_start_offset; eb 2511 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.batch_len = args->batch_len; eb 2513 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.batch_flags = 0; eb 2525 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.batch_flags |= I915_DISPATCH_SECURE; eb 2528 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.batch_flags |= I915_DISPATCH_PINNED; eb 2557 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb_create(&eb); eb 2561 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c GEM_BUG_ON(!eb.lut_size); eb 2563 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb_select_context(&eb); eb 2567 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb_pin_engine(&eb, file, args); eb 2575 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb_relocate(&eb); eb 2588 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) { eb 2593 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb.batch_start_offset > eb.batch->size || eb 2594 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.batch_len > eb.batch->size - eb.batch_start_offset) { eb 2600 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb.batch_len == 0) eb 2601 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.batch_len = eb.batch->size - eb.batch_start_offset; eb 2603 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb_use_cmdparser(&eb)) { eb 2606 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vma = eb_parse(&eb); eb 2617 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb.batch_flags & I915_DISPATCH_SECURE) { eb 2630 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0); eb 2636 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.batch = vma; eb 2640 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c GEM_BUG_ON(eb.reloc_cache.rq); eb 2643 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.request = i915_request_create(eb.context); eb 2644 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (IS_ERR(eb.request)) { eb 2645 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = PTR_ERR(eb.request); eb 2650 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = i915_request_await_dma_fence(eb.request, in_fence); eb 2656 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = i915_request_await_execution(eb.request, exec_fence, eb 2657 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.engine->bond_execute); eb 2663 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = await_fence_array(&eb, fences); eb 2669 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c out_fence = sync_file_create(&eb.request->fence); eb 2683 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb.request->batch = eb.batch; eb 2684 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb.batch->private) eb 2685 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c intel_engine_pool_mark_active(eb.batch->private, eb.request); eb 2687 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c trace_i915_request_queue(eb.request, eb.batch_flags); eb 2688 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c err = eb_submit(&eb); eb 2690 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c add_to_client(eb.request, file); eb 2691 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c i915_request_add(eb.request); eb 2694 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c signal_fence_array(&eb, fences); eb 2708 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb.batch_flags & I915_DISPATCH_SECURE) eb 2709 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c i915_vma_unpin(eb.batch); eb 2710 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb.batch->private) eb 2711 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c intel_engine_pool_put(eb.batch->private); eb 2713 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (eb.exec) eb 2714 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_release_vmas(&eb); eb 2717 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_unpin_engine(&eb); eb 2719 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c i915_gem_context_put(eb.gem_context); eb 2721 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c eb_destroy(&eb); eb 113 drivers/input/mouse/synaptics.h #define SYN_EXT_BUTTON_STICK_L(eb) (((eb) & BIT(0)) >> 0) eb 114 drivers/input/mouse/synaptics.h #define SYN_EXT_BUTTON_STICK_M(eb) (((eb) & BIT(1)) >> 1) eb 115 drivers/input/mouse/synaptics.h #define SYN_EXT_BUTTON_STICK_R(eb) (((eb) & BIT(2)) >> 2) eb 819 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) eb 821 drivers/media/common/videobuf2/videobuf2-v4l2.c return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index, eb 822 drivers/media/common/videobuf2/videobuf2-v4l2.c eb->plane, eb->flags); eb 384 drivers/media/platform/exynos-gsc/gsc-m2m.c struct v4l2_exportbuffer *eb) eb 387 drivers/media/platform/exynos-gsc/gsc-m2m.c return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb); eb 646 drivers/media/platform/s5p-mfc/s5p_mfc_dec.c struct v4l2_exportbuffer *eb) eb 650 drivers/media/platform/s5p-mfc/s5p_mfc_dec.c if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) eb 651 drivers/media/platform/s5p-mfc/s5p_mfc_dec.c return vb2_expbuf(&ctx->vq_src, eb); eb 652 drivers/media/platform/s5p-mfc/s5p_mfc_dec.c if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) eb 653 drivers/media/platform/s5p-mfc/s5p_mfc_dec.c return vb2_expbuf(&ctx->vq_dst, eb); eb 1637 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c struct v4l2_exportbuffer *eb) eb 1641 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c if (eb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) eb 1642 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c return vb2_expbuf(&ctx->vq_src, eb); eb 1643 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c if (eb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) eb 1644 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c return vb2_expbuf(&ctx->vq_dst, eb); eb 542 drivers/media/v4l2-core/v4l2-mem2mem.c struct v4l2_exportbuffer *eb) eb 546 drivers/media/v4l2-core/v4l2-mem2mem.c vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); eb 547 drivers/media/v4l2-core/v4l2-mem2mem.c return vb2_expbuf(vq, eb); eb 1102 drivers/media/v4l2-core/v4l2-mem2mem.c struct v4l2_exportbuffer *eb) eb 1106 drivers/media/v4l2-core/v4l2-mem2mem.c return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); eb 184 drivers/mtd/mtdswap.c static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb) eb 186 drivers/mtd/mtdswap.c return (loff_t)(eb - d->eb_data) * d->mtd->erasesize; eb 189 drivers/mtd/mtdswap.c static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb) eb 194 drivers/mtd/mtdswap.c if (eb->root) { eb 195 drivers/mtd/mtdswap.c tp = container_of(eb->root, struct mtdswap_tree, root); eb 199 drivers/mtd/mtdswap.c rb_erase(&eb->rb, eb->root); eb 203 drivers/mtd/mtdswap.c static void __mtdswap_rb_add(struct rb_root *root, struct swap_eb *eb) eb 212 drivers/mtd/mtdswap.c if (eb->erase_count > cur->erase_count) eb 218 drivers/mtd/mtdswap.c rb_link_node(&eb->rb, parent, p); eb 219 drivers/mtd/mtdswap.c rb_insert_color(&eb->rb, root); eb 222 drivers/mtd/mtdswap.c static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx) eb 226 drivers/mtd/mtdswap.c if (eb->root == &d->trees[idx].root) eb 229 drivers/mtd/mtdswap.c mtdswap_eb_detach(d, eb); eb 231 drivers/mtd/mtdswap.c __mtdswap_rb_add(root, eb); eb 232 drivers/mtd/mtdswap.c eb->root = root; eb 251 drivers/mtd/mtdswap.c static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb) eb 257 drivers/mtd/mtdswap.c eb->flags |= EBLOCK_BAD; eb 258 drivers/mtd/mtdswap.c mtdswap_eb_detach(d, eb); eb 259 drivers/mtd/mtdswap.c eb->root = NULL; eb 265 drivers/mtd/mtdswap.c offset = mtdswap_eb_offset(d, eb); eb 279 drivers/mtd/mtdswap.c static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb) eb 281 drivers/mtd/mtdswap.c unsigned int marked = eb->flags & EBLOCK_FAILED; eb 284 drivers/mtd/mtdswap.c eb->flags |= EBLOCK_FAILED; eb 285 drivers/mtd/mtdswap.c if (curr_write == eb) { eb 289 drivers/mtd/mtdswap.c mtdswap_rb_add(d, eb, MTDSWAP_FAILING); eb 294 drivers/mtd/mtdswap.c return mtdswap_handle_badblock(d, eb); eb 321 drivers/mtd/mtdswap.c static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb) eb 328 drivers/mtd/mtdswap.c offset = mtdswap_eb_offset(d, eb); eb 350 drivers/mtd/mtdswap.c eb->erase_count = le32_to_cpu(data->count); eb 360 drivers/mtd/mtdswap.c eb->flags |= EBLOCK_NOMAGIC; eb 367 drivers/mtd/mtdswap.c static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb, eb 382 drivers/mtd/mtdswap.c n.count = cpu_to_le32(eb->erase_count); eb 384 drivers/mtd/mtdswap.c offset = mtdswap_eb_offset(d, eb); eb 388 drivers/mtd/mtdswap.c offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize; eb 397 drivers/mtd/mtdswap.c mtdswap_handle_write_error(d, eb); eb 420 drivers/mtd/mtdswap.c struct swap_eb *eb; eb 425 drivers/mtd/mtdswap.c eb = d->eb_data + i; eb 427 drivers/mtd/mtdswap.c if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR)) eb 430 drivers/mtd/mtdswap.c __mtdswap_rb_add(&hist_root, eb); eb 443 drivers/mtd/mtdswap.c eb = d->eb_data + i; eb 445 drivers/mtd/mtdswap.c if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR)) eb 446 drivers/mtd/mtdswap.c eb->erase_count = median; eb 448 drivers/mtd/mtdswap.c if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR)) eb 451 drivers/mtd/mtdswap.c rb_erase(&eb->rb, &hist_root); eb 459 drivers/mtd/mtdswap.c struct swap_eb *eb; eb 462 drivers/mtd/mtdswap.c eb = d->eb_data + i; eb 464 drivers/mtd/mtdswap.c status = mtdswap_read_markers(d, eb); eb 466 drivers/mtd/mtdswap.c eb->flags |= EBLOCK_READERR; eb 468 drivers/mtd/mtdswap.c eb->flags |= EBLOCK_BAD; eb 484 drivers/mtd/mtdswap.c eb->flags |= (idx << EBLOCK_IDX_SHIFT); eb 490 drivers/mtd/mtdswap.c eb = d->eb_data + i; eb 492 drivers/mtd/mtdswap.c if (eb->flags & EBLOCK_BAD) eb 495 drivers/mtd/mtdswap.c idx = eb->flags >> EBLOCK_IDX_SHIFT; eb 496 drivers/mtd/mtdswap.c mtdswap_rb_add(d, eb, idx); eb 504 drivers/mtd/mtdswap.c static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb) eb 506 drivers/mtd/mtdswap.c unsigned int weight = eb->active_count; eb 509 drivers/mtd/mtdswap.c if (eb == d->curr_write) eb 512 drivers/mtd/mtdswap.c if (eb->flags & EBLOCK_BITFLIP) eb 513 drivers/mtd/mtdswap.c mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); eb 514 drivers/mtd/mtdswap.c else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED)) eb 515 drivers/mtd/mtdswap.c mtdswap_rb_add(d, eb, MTDSWAP_FAILING); eb 517 drivers/mtd/mtdswap.c mtdswap_rb_add(d, eb, MTDSWAP_USED); eb 519 drivers/mtd/mtdswap.c mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); eb 521 drivers/mtd/mtdswap.c mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG); eb 523 drivers/mtd/mtdswap.c mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG); eb 526 drivers/mtd/mtdswap.c static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb) eb 533 drivers/mtd/mtdswap.c eb->erase_count++; eb 534 drivers/mtd/mtdswap.c if (eb->erase_count > d->max_erase_count) eb 535 drivers/mtd/mtdswap.c d->max_erase_count = eb->erase_count; eb 539 drivers/mtd/mtdswap.c erase.addr = mtdswap_eb_offset(d, eb); eb 555 drivers/mtd/mtdswap.c mtdswap_handle_badblock(d, eb); eb 568 drivers/mtd/mtdswap.c struct swap_eb *eb; eb 576 drivers/mtd/mtdswap.c eb = rb_entry(rb_first(clean_root), struct swap_eb, rb); eb 577 drivers/mtd/mtdswap.c rb_erase(&eb->rb, clean_root); eb 578 drivers/mtd/mtdswap.c eb->root = NULL; eb 581 drivers/mtd/mtdswap.c ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY); eb 588 drivers/mtd/mtdswap.c d->curr_write = eb; eb 618 drivers/mtd/mtdswap.c struct swap_eb *eb; eb 630 drivers/mtd/mtdswap.c eb = d->eb_data + (*bp / d->pages_per_eblk); eb 634 drivers/mtd/mtdswap.c eb->active_count--; eb 646 drivers/mtd/mtdswap.c eb->active_count--; eb 648 drivers/mtd/mtdswap.c mtdswap_handle_write_error(d, eb); eb 669 drivers/mtd/mtdswap.c eb->active_count--; eb 679 drivers/mtd/mtdswap.c struct swap_eb *eb, *oldeb; eb 719 drivers/mtd/mtdswap.c eb = d->eb_data + *newblock / d->pages_per_eblk; eb 722 drivers/mtd/mtdswap.c eb = d->eb_data + oldblock / d->pages_per_eblk; eb 723 drivers/mtd/mtdswap.c eb->active_count--; eb 733 drivers/mtd/mtdswap.c static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb) eb 739 drivers/mtd/mtdswap.c eblk_base = (eb - d->eb_data) * d->pages_per_eblk; eb 849 drivers/mtd/mtdswap.c struct swap_eb *eb = NULL; eb 861 drivers/mtd/mtdswap.c eb = rb_entry(rb_first(rp), struct swap_eb, rb); eb 863 drivers/mtd/mtdswap.c rb_erase(&eb->rb, rp); eb 864 drivers/mtd/mtdswap.c eb->root = NULL; eb 866 drivers/mtd/mtdswap.c return eb; eb 875 drivers/mtd/mtdswap.c struct swap_eb *eb) eb 891 drivers/mtd/mtdswap.c base = mtdswap_eb_offset(d, eb); eb 925 drivers/mtd/mtdswap.c ret = mtdswap_erase_block(d, eb); eb 930 drivers/mtd/mtdswap.c eb->flags &= ~EBLOCK_READERR; eb 934 drivers/mtd/mtdswap.c mtdswap_handle_badblock(d, eb); eb 940 drivers/mtd/mtdswap.c struct swap_eb *eb; eb 946 drivers/mtd/mtdswap.c eb = mtdswap_pick_gc_eblk(d, background); eb 947 drivers/mtd/mtdswap.c if (!eb) eb 950 drivers/mtd/mtdswap.c ret = mtdswap_gc_eblock(d, eb); eb 954 drivers/mtd/mtdswap.c if (eb->flags & EBLOCK_FAILED) { eb 955 drivers/mtd/mtdswap.c mtdswap_handle_badblock(d, eb); eb 959 drivers/mtd/mtdswap.c eb->flags &= ~EBLOCK_BITFLIP; eb 960 drivers/mtd/mtdswap.c ret = mtdswap_erase_block(d, eb); eb 961 drivers/mtd/mtdswap.c if ((eb->flags & EBLOCK_READERR) && eb 962 drivers/mtd/mtdswap.c (ret || !mtdswap_eblk_passes(d, eb))) eb 966 drivers/mtd/mtdswap.c ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN); eb 969 drivers/mtd/mtdswap.c mtdswap_rb_add(d, eb, MTDSWAP_CLEAN); eb 971 drivers/mtd/mtdswap.c mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); eb 1025 drivers/mtd/mtdswap.c struct swap_eb *eb; eb 1043 drivers/mtd/mtdswap.c eb = d->eb_data + (mapped / d->pages_per_eblk); eb 1044 drivers/mtd/mtdswap.c eb->active_count--; eb 1045 drivers/mtd/mtdswap.c mtdswap_store_eb(d, eb); eb 1056 drivers/mtd/mtdswap.c eb = d->eb_data + (newblock / d->pages_per_eblk); eb 1085 drivers/mtd/mtdswap.c struct swap_eb *eb; eb 1107 drivers/mtd/mtdswap.c eb = d->eb_data + (realblock / d->pages_per_eblk); eb 1118 drivers/mtd/mtdswap.c eb->flags |= EBLOCK_BITFLIP; eb 1119 drivers/mtd/mtdswap.c mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); eb 1125 drivers/mtd/mtdswap.c eb->flags |= EBLOCK_READERR; eb 1126 drivers/mtd/mtdswap.c mtdswap_rb_add(d, eb, MTDSWAP_FAILING); eb 1147 drivers/mtd/mtdswap.c struct swap_eb *eb; eb 1155 drivers/mtd/mtdswap.c eb = d->eb_data + (mapped / d->pages_per_eblk); eb 1156 drivers/mtd/mtdswap.c eb->active_count--; eb 1157 drivers/mtd/mtdswap.c mtdswap_store_eb(d, eb); eb 267 drivers/mtd/nand/raw/marvell_nand.c #define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb) \ eb 276 drivers/mtd/nand/raw/marvell_nand.c .ecc_bytes = eb, \ eb 5465 drivers/mtd/nand/raw/nand_base.c unsigned int eb = nanddev_pos_to_row(nand, pos); eb 5468 drivers/mtd/nand/raw/nand_base.c eb >>= nand->rowconv.eraseblock_addr_shift; eb 5471 drivers/mtd/nand/raw/nand_base.c ret = nand_erase_op(chip, eb); eb 42 drivers/mtd/tests/mtd_test.c unsigned int eb, int ebcnt) eb 51 drivers/mtd/tests/mtd_test.c bbt[i] = is_block_bad(mtd, eb + i) ? 1 : 0; eb 62 drivers/mtd/tests/mtd_test.c unsigned int eb, int ebcnt) eb 70 drivers/mtd/tests/mtd_test.c err = mtdtest_erase_eraseblock(mtd, eb + i); eb 18 drivers/mtd/tests/mtd_test.h unsigned int eb, int ebcnt); eb 20 drivers/mtd/tests/mtd_test.h unsigned int eb, int ebcnt); eb 45 drivers/mtd/tests/stresstest.c unsigned int eb; eb 48 drivers/mtd/tests/stresstest.c eb = prandom_u32(); eb 50 drivers/mtd/tests/stresstest.c eb %= (ebcnt - 1); eb 51 drivers/mtd/tests/stresstest.c if (bbt[eb]) eb 53 drivers/mtd/tests/stresstest.c return eb; eb 76 drivers/mtd/tests/stresstest.c int eb = rand_eb(); eb 81 drivers/mtd/tests/stresstest.c if (bbt[eb + 1]) { eb 87 drivers/mtd/tests/stresstest.c addr = (loff_t)eb * mtd->erasesize + offs; eb 93 drivers/mtd/tests/stresstest.c int eb = rand_eb(), offs, err, len; eb 96 drivers/mtd/tests/stresstest.c offs = offsets[eb]; eb 98 drivers/mtd/tests/stresstest.c err = mtdtest_erase_eraseblock(mtd, eb); eb 101 drivers/mtd/tests/stresstest.c offs = offsets[eb] = 0; eb 106 drivers/mtd/tests/stresstest.c if (bbt[eb + 1]) eb 109 drivers/mtd/tests/stresstest.c err = mtdtest_erase_eraseblock(mtd, eb + 1); eb 112 drivers/mtd/tests/stresstest.c offsets[eb + 1] = 0; eb 115 drivers/mtd/tests/stresstest.c addr = (loff_t)eb * mtd->erasesize + offs; eb 121 drivers/mtd/tests/stresstest.c offsets[eb++] = mtd->erasesize; eb 124 drivers/mtd/tests/stresstest.c offsets[eb] = offs; eb 28 drivers/mtd/tests/torturetest.c static int eb = 8; eb 29 drivers/mtd/tests/torturetest.c module_param(eb, int, S_IRUGO); eb 30 drivers/mtd/tests/torturetest.c MODULE_PARM_DESC(eb, "eraseblock number within the selected MTD device"); eb 187 drivers/mtd/tests/torturetest.c ebcnt, eb, eb + ebcnt - 1, dev); eb 247 drivers/mtd/tests/torturetest.c err = mtdtest_scan_for_bad_eraseblocks(mtd, bad_ebs, eb, ebcnt); eb 256 drivers/mtd/tests/torturetest.c err = mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt); eb 262 drivers/mtd/tests/torturetest.c for (i = eb; i < eb + ebcnt; i++) { eb 263 drivers/mtd/tests/torturetest.c if (bad_ebs[i - eb]) eb 279 drivers/mtd/tests/torturetest.c for (i = eb; i < eb + ebcnt; i++) { eb 280 drivers/mtd/tests/torturetest.c if (bad_ebs[i - eb]) eb 282 drivers/mtd/tests/torturetest.c if ((eb + erase_cycles) & 1) eb 297 drivers/mtd/tests/torturetest.c for (i = eb; i < eb + ebcnt; i++) { eb 298 drivers/mtd/tests/torturetest.c if (bad_ebs[i - eb]) eb 300 drivers/mtd/tests/torturetest.c if ((eb + erase_cycles) & 1) eb 308 drivers/mtd/tests/torturetest.c ((eb + erase_cycles) & 1) ? eb 1719 drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h #define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1) eb 1720 drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h #define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb))) eb 1746 drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h #define SHMEM_ARRAY_BITPOS(i, eb, fb) \ eb 1747 drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \ eb 1748 drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h (((i)%((fb)/(eb))) * (eb))) eb 1750 drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h #define SHMEM_ARRAY_GET(a, i, eb, fb) \ eb 1751 drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \ eb 1752 drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h SHMEM_ARRAY_MASK(eb)) eb 1754 drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h #define SHMEM_ARRAY_SET(a, i, eb, fb, val) \ eb 1756 drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \ eb 1757 drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h SHMEM_ARRAY_BITPOS(i, eb, fb)); \ eb 1758 drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \ eb 1759 drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h SHMEM_ARRAY_BITPOS(i, eb, fb)); \ eb 353 drivers/scsi/aic94xx/aic94xx_hwi.c struct sg_el *eb = &escb->eb[k]; eb 356 drivers/scsi/aic94xx/aic94xx_hwi.c memset(eb, 0, sizeof(*eb)); eb 357 drivers/scsi/aic94xx/aic94xx_hwi.c eb->bus_addr = cpu_to_le64(((u64) edb->dma_handle)); eb 358 drivers/scsi/aic94xx/aic94xx_hwi.c eb->size = cpu_to_le32(((u32) edb->size)); eb 425 drivers/scsi/aic94xx/aic94xx_sas.h struct sg_el eb[ASD_EDBS_PER_SCB]; eb 368 drivers/scsi/aic94xx/aic94xx_scb.c struct sg_el *eb = &escb->eb[edb_id]; eb 372 drivers/scsi/aic94xx/aic94xx_scb.c eb->flags |= ELEMENT_NOT_VALID; eb 388 drivers/scsi/aic94xx/aic94xx_scb.c escb->eb[i].flags = 0; eb 27 fs/btrfs/backref.c const struct extent_buffer *eb, eb 37 fs/btrfs/backref.c !btrfs_file_extent_compression(eb, fi) && eb 38 fs/btrfs/backref.c !btrfs_file_extent_encryption(eb, fi) && eb 39 fs/btrfs/backref.c !btrfs_file_extent_other_encoding(eb, fi)) { eb 43 fs/btrfs/backref.c data_offset = btrfs_file_extent_offset(eb, fi); eb 44 fs/btrfs/backref.c data_len = btrfs_file_extent_num_bytes(eb, fi); eb 74 fs/btrfs/backref.c static int find_extent_in_eb(const struct extent_buffer *eb, eb 92 fs/btrfs/backref.c nritems = btrfs_header_nritems(eb); eb 94 fs/btrfs/backref.c btrfs_item_key_to_cpu(eb, &key, slot); eb 97 fs/btrfs/backref.c fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); eb 98 fs/btrfs/backref.c extent_type = btrfs_file_extent_type(eb, fi); eb 102 fs/btrfs/backref.c disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); eb 106 fs/btrfs/backref.c ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset); eb 419 fs/btrfs/backref.c struct extent_buffer *eb; eb 429 fs/btrfs/backref.c eb = path->nodes[level]; eb 430 fs/btrfs/backref.c ret = ulist_add(parents, eb->start, 0, GFP_NOFS); eb 449 fs/btrfs/backref.c eb = path->nodes[0]; eb 452 fs/btrfs/backref.c btrfs_item_key_to_cpu(eb, &key, slot); eb 458 fs/btrfs/backref.c fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); eb 459 fs/btrfs/backref.c disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); eb 466 fs/btrfs/backref.c ret = check_extent_in_eb(&key, eb, fi, eb 474 fs/btrfs/backref.c ret = ulist_add_merge_ptr(parents, eb->start, eb 511 fs/btrfs/backref.c struct extent_buffer *eb; eb 567 fs/btrfs/backref.c eb = path->nodes[level]; eb 568 fs/btrfs/backref.c while (!eb) { eb 574 fs/btrfs/backref.c eb = path->nodes[level]; eb 718 fs/btrfs/backref.c struct extent_buffer *eb; eb 730 fs/btrfs/backref.c eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0, eb 732 fs/btrfs/backref.c if (IS_ERR(eb)) { eb 734 fs/btrfs/backref.c return PTR_ERR(eb); eb 735 fs/btrfs/backref.c } else if (!extent_buffer_uptodate(eb)) { eb 737 fs/btrfs/backref.c free_extent_buffer(eb); eb 741 fs/btrfs/backref.c btrfs_tree_read_lock(eb); eb 742 fs/btrfs/backref.c if (btrfs_header_level(eb) == 0) eb 743 fs/btrfs/backref.c btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); eb 745 fs/btrfs/backref.c btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); eb 747 fs/btrfs/backref.c btrfs_tree_read_unlock(eb); eb 748 fs/btrfs/backref.c free_extent_buffer(eb); eb 1281 fs/btrfs/backref.c struct extent_buffer *eb; eb 1283 fs/btrfs/backref.c eb = read_tree_block(fs_info, ref->parent, 0, eb 1285 fs/btrfs/backref.c if (IS_ERR(eb)) { eb 1286 fs/btrfs/backref.c ret = PTR_ERR(eb); eb 1288 fs/btrfs/backref.c } else if (!extent_buffer_uptodate(eb)) { eb 1289 fs/btrfs/backref.c free_extent_buffer(eb); eb 1295 fs/btrfs/backref.c btrfs_tree_read_lock(eb); eb 1296 fs/btrfs/backref.c btrfs_set_lock_blocking_read(eb); eb 1298 fs/btrfs/backref.c ret = find_extent_in_eb(eb, bytenr, eb 1301 fs/btrfs/backref.c btrfs_tree_read_unlock_blocking(eb); eb 1302 fs/btrfs/backref.c free_extent_buffer(eb); eb 1621 fs/btrfs/backref.c struct extent_buffer *eb = eb_in; eb 1633 fs/btrfs/backref.c read_extent_buffer(eb, dest + bytes_left, eb 1635 fs/btrfs/backref.c if (eb != eb_in) { eb 1637 fs/btrfs/backref.c btrfs_tree_read_unlock_blocking(eb); eb 1638 fs/btrfs/backref.c free_extent_buffer(eb); eb 1654 fs/btrfs/backref.c eb = path->nodes[0]; eb 1656 fs/btrfs/backref.c if (eb != eb_in) { eb 1658 fs/btrfs/backref.c btrfs_set_lock_blocking_read(eb); eb 1663 fs/btrfs/backref.c iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); eb 1665 fs/btrfs/backref.c name_len = btrfs_inode_ref_name_len(eb, iref); eb 1696 fs/btrfs/backref.c const struct extent_buffer *eb; eb 1730 fs/btrfs/backref.c eb = path->nodes[0]; eb 1731 fs/btrfs/backref.c item_size = btrfs_item_size_nr(eb, path->slots[0]); eb 1734 fs/btrfs/backref.c ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); eb 1735 fs/btrfs/backref.c flags = btrfs_extent_flags(eb, ei); eb 1765 fs/btrfs/backref.c const struct extent_buffer *eb, eb 1778 fs/btrfs/backref.c flags = btrfs_extent_flags(eb, ei); eb 1800 fs/btrfs/backref.c *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref, eb 1820 fs/btrfs/backref.c int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, eb 1832 fs/btrfs/backref.c ret = get_extent_inline_ref(ptr, eb, key, ei, item_size, eb 1846 fs/btrfs/backref.c *out_root = btrfs_extent_inline_ref_offset(eb, eiref); eb 1852 fs/btrfs/backref.c *out_level = btrfs_tree_block_level(eb, info); eb 1996 fs/btrfs/backref.c struct extent_buffer *eb, void *ctx); eb 2009 fs/btrfs/backref.c struct extent_buffer *eb; eb 2029 fs/btrfs/backref.c eb = btrfs_clone_extent_buffer(path->nodes[0]); eb 2030 fs/btrfs/backref.c if (!eb) { eb 2037 fs/btrfs/backref.c iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); eb 2039 fs/btrfs/backref.c for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) { eb 2040 fs/btrfs/backref.c name_len = btrfs_inode_ref_name_len(eb, iref); eb 2047 fs/btrfs/backref.c (unsigned long)(iref + 1), eb, ctx); eb 2053 fs/btrfs/backref.c free_extent_buffer(eb); eb 2070 fs/btrfs/backref.c struct extent_buffer *eb; eb 2088 fs/btrfs/backref.c eb = btrfs_clone_extent_buffer(path->nodes[0]); eb 2089 fs/btrfs/backref.c if (!eb) { eb 2095 fs/btrfs/backref.c item_size = btrfs_item_size_nr(eb, slot); eb 2096 fs/btrfs/backref.c ptr = btrfs_item_ptr_offset(eb, slot); eb 2103 fs/btrfs/backref.c parent = btrfs_inode_extref_parent(eb, extref); eb 2104 fs/btrfs/backref.c name_len = btrfs_inode_extref_name_len(eb, extref); eb 2106 fs/btrfs/backref.c (unsigned long)&extref->name, eb, ctx); eb 2110 fs/btrfs/backref.c cur_offset += btrfs_inode_extref_name_len(eb, extref); eb 2113 fs/btrfs/backref.c free_extent_buffer(eb); eb 2148 fs/btrfs/backref.c struct extent_buffer *eb, void *ctx) eb 2162 fs/btrfs/backref.c name_off, eb, inum, fspath_min, bytes_left); eb 26 fs/btrfs/backref.h int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, eb 128 fs/btrfs/ctree.c struct extent_buffer *eb; eb 132 fs/btrfs/ctree.c eb = rcu_dereference(root->node); eb 140 fs/btrfs/ctree.c if (atomic_inc_not_zero(&eb->refs)) { eb 147 fs/btrfs/ctree.c return eb; eb 156 fs/btrfs/ctree.c struct extent_buffer *eb; eb 159 fs/btrfs/ctree.c eb = btrfs_root_node(root); eb 160 fs/btrfs/ctree.c btrfs_tree_lock(eb); eb 161 fs/btrfs/ctree.c if (eb == root->node) eb 163 fs/btrfs/ctree.c btrfs_tree_unlock(eb); eb 164 fs/btrfs/ctree.c free_extent_buffer(eb); eb 166 fs/btrfs/ctree.c return eb; eb 175 fs/btrfs/ctree.c struct extent_buffer *eb; eb 178 fs/btrfs/ctree.c eb = btrfs_root_node(root); eb 179 fs/btrfs/ctree.c btrfs_tree_read_lock(eb); eb 180 fs/btrfs/ctree.c if (eb == root->node) eb 182 fs/btrfs/ctree.c btrfs_tree_read_unlock(eb); eb 183 fs/btrfs/ctree.c free_extent_buffer(eb); eb 185 fs/btrfs/ctree.c return eb; eb 439 fs/btrfs/ctree.c struct extent_buffer *eb) { eb 443 fs/btrfs/ctree.c if (eb && btrfs_header_level(eb) == 0) eb 457 fs/btrfs/ctree.c struct extent_buffer *eb) eb 462 fs/btrfs/ctree.c if (eb && btrfs_header_level(eb) == 0) eb 469 fs/btrfs/ctree.c alloc_tree_mod_elem(struct extent_buffer *eb, int slot, eb 478 fs/btrfs/ctree.c tm->logical = eb->start; eb 480 fs/btrfs/ctree.c btrfs_node_key(eb, &tm->key, slot); eb 481 fs/btrfs/ctree.c tm->blockptr = btrfs_node_blockptr(eb, slot); eb 485 fs/btrfs/ctree.c tm->generation = btrfs_node_ptr_generation(eb, slot); eb 491 fs/btrfs/ctree.c static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot, eb 497 fs/btrfs/ctree.c if (!tree_mod_need_log(eb->fs_info, eb)) eb 500 fs/btrfs/ctree.c tm = alloc_tree_mod_elem(eb, slot, op, flags); eb 504 fs/btrfs/ctree.c if (tree_mod_dont_log(eb->fs_info, eb)) { eb 509 fs/btrfs/ctree.c ret = __tree_mod_log_insert(eb->fs_info, tm); eb 510 fs/btrfs/ctree.c write_unlock(&eb->fs_info->tree_mod_log_lock); eb 517 fs/btrfs/ctree.c static noinline int tree_mod_log_insert_move(struct extent_buffer *eb, eb 526 fs/btrfs/ctree.c if (!tree_mod_need_log(eb->fs_info, eb)) eb 539 fs/btrfs/ctree.c tm->logical = eb->start; eb 546 fs/btrfs/ctree.c tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot, eb 554 fs/btrfs/ctree.c if (tree_mod_dont_log(eb->fs_info, eb)) eb 564 fs/btrfs/ctree.c ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]); eb 569 fs/btrfs/ctree.c ret = __tree_mod_log_insert(eb->fs_info, tm); eb 572 fs/btrfs/ctree.c write_unlock(&eb->fs_info->tree_mod_log_lock); eb 579 fs/btrfs/ctree.c rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log); eb 583 fs/btrfs/ctree.c write_unlock(&eb->fs_info->tree_mod_log_lock); eb 816 fs/btrfs/ctree.c static noinline int tree_mod_log_free_eb(struct extent_buffer *eb) eb 823 fs/btrfs/ctree.c if (btrfs_header_level(eb) == 0) eb 826 fs/btrfs/ctree.c if (!tree_mod_need_log(eb->fs_info, NULL)) eb 829 fs/btrfs/ctree.c nritems = btrfs_header_nritems(eb); eb 835 fs/btrfs/ctree.c tm_list[i] = alloc_tree_mod_elem(eb, i, eb 843 fs/btrfs/ctree.c if (tree_mod_dont_log(eb->fs_info, eb)) eb 846 fs/btrfs/ctree.c ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems); eb 847 fs/btrfs/ctree.c write_unlock(&eb->fs_info->tree_mod_log_lock); eb 1220 fs/btrfs/ctree.c __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, eb 1230 fs/btrfs/ctree.c n = btrfs_header_nritems(eb); eb 1244 fs/btrfs/ctree.c btrfs_set_node_key(eb, &tm->key, tm->slot); eb 1245 fs/btrfs/ctree.c btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); eb 1246 fs/btrfs/ctree.c btrfs_set_node_ptr_generation(eb, tm->slot, eb 1252 fs/btrfs/ctree.c btrfs_set_node_key(eb, &tm->key, tm->slot); eb 1253 fs/btrfs/ctree.c btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); eb 1254 fs/btrfs/ctree.c btrfs_set_node_ptr_generation(eb, tm->slot, eb 1264 fs/btrfs/ctree.c memmove_extent_buffer(eb, o_dst, o_src, eb 1287 fs/btrfs/ctree.c btrfs_set_header_nritems(eb, n); eb 1299 fs/btrfs/ctree.c struct extent_buffer *eb, u64 time_seq) eb 1305 fs/btrfs/ctree.c return eb; eb 1307 fs/btrfs/ctree.c if (btrfs_header_level(eb) == 0) eb 1308 fs/btrfs/ctree.c return eb; eb 1310 fs/btrfs/ctree.c tm = tree_mod_log_search(fs_info, eb->start, time_seq); eb 1312 fs/btrfs/ctree.c return eb; eb 1315 fs/btrfs/ctree.c btrfs_set_lock_blocking_read(eb); eb 1319 fs/btrfs/ctree.c eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); eb 1321 fs/btrfs/ctree.c btrfs_tree_read_unlock_blocking(eb); eb 1322 fs/btrfs/ctree.c free_extent_buffer(eb); eb 1325 fs/btrfs/ctree.c btrfs_set_header_bytenr(eb_rewin, eb->start); eb 1327 fs/btrfs/ctree.c btrfs_header_backref_rev(eb)); eb 1328 fs/btrfs/ctree.c btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb)); eb 1329 fs/btrfs/ctree.c btrfs_set_header_level(eb_rewin, btrfs_header_level(eb)); eb 1331 fs/btrfs/ctree.c eb_rewin = btrfs_clone_extent_buffer(eb); eb 1333 fs/btrfs/ctree.c btrfs_tree_read_unlock_blocking(eb); eb 1334 fs/btrfs/ctree.c free_extent_buffer(eb); eb 1339 fs/btrfs/ctree.c btrfs_tree_read_unlock_blocking(eb); eb 1340 fs/btrfs/ctree.c free_extent_buffer(eb); eb 1362 fs/btrfs/ctree.c struct extent_buffer *eb = NULL; eb 1398 fs/btrfs/ctree.c eb = btrfs_clone_extent_buffer(old); eb 1405 fs/btrfs/ctree.c eb = alloc_dummy_extent_buffer(fs_info, logical); eb 1408 fs/btrfs/ctree.c eb = btrfs_clone_extent_buffer(eb_root); eb 1413 fs/btrfs/ctree.c if (!eb) eb 1415 fs/btrfs/ctree.c btrfs_tree_read_lock(eb); eb 1417 fs/btrfs/ctree.c btrfs_set_header_bytenr(eb, eb->start); eb 1418 fs/btrfs/ctree.c btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); eb 1419 fs/btrfs/ctree.c btrfs_set_header_owner(eb, eb_root_owner); eb 1420 fs/btrfs/ctree.c btrfs_set_header_level(eb, old_root->level); eb 1421 fs/btrfs/ctree.c btrfs_set_header_generation(eb, old_generation); eb 1424 fs/btrfs/ctree.c __tree_mod_log_rewind(fs_info, eb, time_seq, tm); eb 1426 fs/btrfs/ctree.c WARN_ON(btrfs_header_level(eb) != 0); eb 1427 fs/btrfs/ctree.c WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info)); eb 1429 fs/btrfs/ctree.c return eb; eb 1706 fs/btrfs/ctree.c static noinline int generic_bin_search(struct extent_buffer *eb, eb 1724 fs/btrfs/ctree.c btrfs_err(eb->fs_info, eb 1726 fs/btrfs/ctree.c __func__, low, high, eb->start, eb 1727 fs/btrfs/ctree.c btrfs_header_owner(eb), btrfs_header_level(eb)); eb 1739 fs/btrfs/ctree.c err = map_private_extent_buffer(eb, offset, eb 1747 fs/btrfs/ctree.c read_extent_buffer(eb, &unaligned, eb 1777 fs/btrfs/ctree.c int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, eb 1781 fs/btrfs/ctree.c return generic_bin_search(eb, eb 1784 fs/btrfs/ctree.c key, btrfs_header_nritems(eb), eb 1787 fs/btrfs/ctree.c return generic_bin_search(eb, eb 1790 fs/btrfs/ctree.c key, btrfs_header_nritems(eb), eb 1817 fs/btrfs/ctree.c struct extent_buffer *eb; eb 1826 fs/btrfs/ctree.c eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot), eb 1829 fs/btrfs/ctree.c if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) { eb 1830 fs/btrfs/ctree.c free_extent_buffer(eb); eb 1831 fs/btrfs/ctree.c eb = ERR_PTR(-EIO); eb 1834 fs/btrfs/ctree.c return eb; eb 2221 fs/btrfs/ctree.c struct extent_buffer *eb; eb 2236 fs/btrfs/ctree.c eb = find_extent_buffer(fs_info, search); eb 2237 fs/btrfs/ctree.c if (eb) { eb 2238 fs/btrfs/ctree.c free_extent_buffer(eb); eb 2280 fs/btrfs/ctree.c struct extent_buffer *eb; eb 2295 fs/btrfs/ctree.c eb = find_extent_buffer(fs_info, block1); eb 2301 fs/btrfs/ctree.c if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) eb 2303 fs/btrfs/ctree.c free_extent_buffer(eb); eb 2308 fs/btrfs/ctree.c eb = find_extent_buffer(fs_info, block2); eb 2309 fs/btrfs/ctree.c if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) eb 2311 fs/btrfs/ctree.c free_extent_buffer(eb); eb 2591 fs/btrfs/ctree.c struct extent_buffer *eb; eb 2604 fs/btrfs/ctree.c eb = path->nodes[0]; eb 2605 fs/btrfs/ctree.c if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { eb 2609 fs/btrfs/ctree.c eb = path->nodes[0]; eb 2612 fs/btrfs/ctree.c btrfs_item_key_to_cpu(eb, found_key, path->slots[0]); eb 3198 fs/btrfs/ctree.c struct extent_buffer *eb; eb 3201 fs/btrfs/ctree.c eb = path->nodes[0]; eb 3204 fs/btrfs/ctree.c btrfs_item_key(eb, &disk_key, slot - 1); eb 3213 fs/btrfs/ctree.c btrfs_print_leaf(eb); eb 3217 fs/btrfs/ctree.c if (slot < btrfs_header_nritems(eb) - 1) { eb 3218 fs/btrfs/ctree.c btrfs_item_key(eb, &disk_key, slot + 1); eb 3227 fs/btrfs/ctree.c btrfs_print_leaf(eb); eb 3233 fs/btrfs/ctree.c btrfs_set_item_key(eb, &disk_key, slot); eb 3234 fs/btrfs/ctree.c btrfs_mark_buffer_dirty(eb); eb 1290 fs/btrfs/ctree.h const struct extent_buffer *eb; eb 1299 fs/btrfs/ctree.h struct extent_buffer *eb) eb 1301 fs/btrfs/ctree.h token->eb = eb; eb 1313 fs/btrfs/ctree.h #define read_eb_member(eb, ptr, type, member, result) (\ eb 1314 fs/btrfs/ctree.h read_extent_buffer(eb, (char *)(result), \ eb 1319 fs/btrfs/ctree.h #define write_eb_member(eb, ptr, type, member, result) (\ eb 1320 fs/btrfs/ctree.h write_extent_buffer(eb, (char *)(result), \ eb 1326 fs/btrfs/ctree.h u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ eb 1329 fs/btrfs/ctree.h void btrfs_set_token_##bits(struct extent_buffer *eb, const void *ptr, \ eb 1332 fs/btrfs/ctree.h u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ eb 1334 fs/btrfs/ctree.h void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ eb 1343 fs/btrfs/ctree.h static inline u##bits btrfs_##name(const struct extent_buffer *eb, \ eb 1347 fs/btrfs/ctree.h return btrfs_get_##bits(eb, s, offsetof(type, member)); \ eb 1349 fs/btrfs/ctree.h static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \ eb 1353 fs/btrfs/ctree.h btrfs_set_##bits(eb, s, offsetof(type, member), val); \ eb 1355 fs/btrfs/ctree.h static inline u##bits btrfs_token_##name(const struct extent_buffer *eb,\ eb 1360 fs/btrfs/ctree.h return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \ eb 1362 fs/btrfs/ctree.h static inline void btrfs_set_token_##name(struct extent_buffer *eb, \ eb 1367 fs/btrfs/ctree.h btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \ eb 1371 fs/btrfs/ctree.h static inline u##bits btrfs_##name(const struct extent_buffer *eb) \ eb 1373 fs/btrfs/ctree.h const type *p = page_address(eb->pages[0]); \ eb 1377 fs/btrfs/ctree.h static inline void btrfs_set_##name(struct extent_buffer *eb, \ eb 1380 fs/btrfs/ctree.h type *p = page_address(eb->pages[0]); \ eb 1395 fs/btrfs/ctree.h static inline u64 btrfs_device_total_bytes(struct extent_buffer *eb, eb 1400 fs/btrfs/ctree.h return btrfs_get_64(eb, s, offsetof(struct btrfs_dev_item, eb 1403 fs/btrfs/ctree.h static inline void btrfs_set_device_total_bytes(struct extent_buffer *eb, eb 1409 fs/btrfs/ctree.h WARN_ON(!IS_ALIGNED(val, eb->fs_info->sectorsize)); eb 1410 fs/btrfs/ctree.h btrfs_set_64(eb, s, offsetof(struct btrfs_dev_item, total_bytes), val); eb 1507 fs/btrfs/ctree.h static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, eb 1510 fs/btrfs/ctree.h return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); eb 1513 fs/btrfs/ctree.h static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, eb 1516 fs/btrfs/ctree.h return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); eb 1610 fs/btrfs/ctree.h static inline void btrfs_tree_block_key(struct extent_buffer *eb, eb 1614 fs/btrfs/ctree.h read_eb_member(eb, item, struct btrfs_tree_block_info, key, key); eb 1617 fs/btrfs/ctree.h static inline void btrfs_set_tree_block_key(struct extent_buffer *eb, eb 1621 fs/btrfs/ctree.h write_eb_member(eb, item, struct btrfs_tree_block_info, key, key); eb 1669 fs/btrfs/ctree.h static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr) eb 1674 fs/btrfs/ctree.h return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr); eb 1677 fs/btrfs/ctree.h static inline void btrfs_set_node_blockptr(struct extent_buffer *eb, eb 1683 fs/btrfs/ctree.h btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val); eb 1686 fs/btrfs/ctree.h static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr) eb 1691 fs/btrfs/ctree.h return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr); eb 1694 fs/btrfs/ctree.h static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb, eb 1700 fs/btrfs/ctree.h btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val); eb 1709 fs/btrfs/ctree.h void btrfs_node_key(const struct extent_buffer *eb, eb 1712 fs/btrfs/ctree.h static inline void btrfs_set_node_key(struct extent_buffer *eb, eb 1717 fs/btrfs/ctree.h write_eb_member(eb, (struct btrfs_key_ptr *)ptr, eb 1738 fs/btrfs/ctree.h static inline u32 btrfs_item_end(const struct extent_buffer *eb, eb 1741 fs/btrfs/ctree.h return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item); eb 1744 fs/btrfs/ctree.h static inline u32 btrfs_item_end_nr(const struct extent_buffer *eb, int nr) eb 1746 fs/btrfs/ctree.h return btrfs_item_end(eb, btrfs_item_nr(nr)); eb 1749 fs/btrfs/ctree.h static inline u32 btrfs_item_offset_nr(const struct extent_buffer *eb, int nr) eb 1751 fs/btrfs/ctree.h return btrfs_item_offset(eb, btrfs_item_nr(nr)); eb 1754 fs/btrfs/ctree.h static inline u32 btrfs_item_size_nr(const struct extent_buffer *eb, int nr) eb 1756 fs/btrfs/ctree.h return btrfs_item_size(eb, btrfs_item_nr(nr)); eb 1759 fs/btrfs/ctree.h static inline void btrfs_item_key(const struct extent_buffer *eb, eb 1763 fs/btrfs/ctree.h read_eb_member(eb, item, struct btrfs_item, key, disk_key); eb 1766 fs/btrfs/ctree.h static inline void btrfs_set_item_key(struct extent_buffer *eb, eb 1770 fs/btrfs/ctree.h write_eb_member(eb, item, struct btrfs_item, key, disk_key); eb 1795 fs/btrfs/ctree.h static inline void btrfs_dir_item_key(const struct extent_buffer *eb, eb 1799 fs/btrfs/ctree.h read_eb_member(eb, item, struct btrfs_dir_item, location, key); eb 1802 fs/btrfs/ctree.h static inline void btrfs_set_dir_item_key(struct extent_buffer *eb, eb 1806 fs/btrfs/ctree.h write_eb_member(eb, item, struct btrfs_dir_item, location, key); eb 1816 fs/btrfs/ctree.h static inline void btrfs_free_space_key(const struct extent_buffer *eb, eb 1820 fs/btrfs/ctree.h read_eb_member(eb, h, struct btrfs_free_space_header, location, key); eb 1823 fs/btrfs/ctree.h static inline void btrfs_set_free_space_key(struct extent_buffer *eb, eb 1827 fs/btrfs/ctree.h write_eb_member(eb, h, struct btrfs_free_space_header, location, key); eb 1852 fs/btrfs/ctree.h static inline void btrfs_node_key_to_cpu(const struct extent_buffer *eb, eb 1856 fs/btrfs/ctree.h btrfs_node_key(eb, &disk_key, nr); eb 1860 fs/btrfs/ctree.h static inline void btrfs_item_key_to_cpu(const struct extent_buffer *eb, eb 1864 fs/btrfs/ctree.h btrfs_item_key(eb, &disk_key, nr); eb 1868 fs/btrfs/ctree.h static inline void btrfs_dir_item_key_to_cpu(const struct extent_buffer *eb, eb 1873 fs/btrfs/ctree.h btrfs_dir_item_key(eb, item, &disk_key); eb 1892 fs/btrfs/ctree.h static inline int btrfs_header_flag(const struct extent_buffer *eb, u64 flag) eb 1894 fs/btrfs/ctree.h return (btrfs_header_flags(eb) & flag) == flag; eb 1897 fs/btrfs/ctree.h static inline void btrfs_set_header_flag(struct extent_buffer *eb, u64 flag) eb 1899 fs/btrfs/ctree.h u64 flags = btrfs_header_flags(eb); eb 1900 fs/btrfs/ctree.h btrfs_set_header_flags(eb, flags | flag); eb 1903 fs/btrfs/ctree.h static inline void btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag) eb 1905 fs/btrfs/ctree.h u64 flags = btrfs_header_flags(eb); eb 1906 fs/btrfs/ctree.h btrfs_set_header_flags(eb, flags & ~flag); eb 1909 fs/btrfs/ctree.h static inline int btrfs_header_backref_rev(const struct extent_buffer *eb) eb 1911 fs/btrfs/ctree.h u64 flags = btrfs_header_flags(eb); eb 1915 fs/btrfs/ctree.h static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb, eb 1918 fs/btrfs/ctree.h u64 flags = btrfs_header_flags(eb); eb 1921 fs/btrfs/ctree.h btrfs_set_header_flags(eb, flags); eb 1929 fs/btrfs/ctree.h static inline unsigned long btrfs_header_chunk_tree_uuid(const struct extent_buffer *eb) eb 1934 fs/btrfs/ctree.h static inline int btrfs_is_leaf(const struct extent_buffer *eb) eb 1936 fs/btrfs/ctree.h return btrfs_header_level(eb) == 0; eb 2030 fs/btrfs/ctree.h static inline void btrfs_balance_data(const struct extent_buffer *eb, eb 2034 fs/btrfs/ctree.h read_eb_member(eb, bi, struct btrfs_balance_item, data, ba); eb 2037 fs/btrfs/ctree.h static inline void btrfs_set_balance_data(struct extent_buffer *eb, eb 2041 fs/btrfs/ctree.h write_eb_member(eb, bi, struct btrfs_balance_item, data, ba); eb 2044 fs/btrfs/ctree.h static inline void btrfs_balance_meta(const struct extent_buffer *eb, eb 2048 fs/btrfs/ctree.h read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); eb 2051 fs/btrfs/ctree.h static inline void btrfs_set_balance_meta(struct extent_buffer *eb, eb 2055 fs/btrfs/ctree.h write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); eb 2058 fs/btrfs/ctree.h static inline void btrfs_balance_sys(const struct extent_buffer *eb, eb 2062 fs/btrfs/ctree.h read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); eb 2065 fs/btrfs/ctree.h static inline void btrfs_set_balance_sys(struct extent_buffer *eb, eb 2069 fs/btrfs/ctree.h write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); eb 2230 fs/btrfs/ctree.h const struct extent_buffer *eb, eb 2233 fs/btrfs/ctree.h return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START; eb 2369 fs/btrfs/ctree.h int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, eb 2412 fs/btrfs/ctree.h int btrfs_exclude_logged_extents(struct extent_buffer *eb); eb 2506 fs/btrfs/ctree.h int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, eb 3397 fs/btrfs/ctree.h int btree_readahead_hook(struct extent_buffer *eb, int err); eb 38 fs/btrfs/dev-replace.c struct extent_buffer *eb; eb 78 fs/btrfs/dev-replace.c eb = path->nodes[0]; eb 79 fs/btrfs/dev-replace.c item_size = btrfs_item_size_nr(eb, slot); eb 80 fs/btrfs/dev-replace.c ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_replace_item); eb 88 fs/btrfs/dev-replace.c src_devid = btrfs_dev_replace_src_devid(eb, ptr); eb 90 fs/btrfs/dev-replace.c btrfs_dev_replace_cont_reading_from_srcdev_mode(eb, ptr); eb 91 fs/btrfs/dev-replace.c dev_replace->replace_state = btrfs_dev_replace_replace_state(eb, ptr); eb 92 fs/btrfs/dev-replace.c dev_replace->time_started = btrfs_dev_replace_time_started(eb, ptr); eb 94 fs/btrfs/dev-replace.c btrfs_dev_replace_time_stopped(eb, ptr); eb 96 fs/btrfs/dev-replace.c btrfs_dev_replace_num_write_errors(eb, ptr)); eb 98 fs/btrfs/dev-replace.c btrfs_dev_replace_num_uncorrectable_read_errors(eb, ptr)); eb 99 fs/btrfs/dev-replace.c dev_replace->cursor_left = btrfs_dev_replace_cursor_left(eb, ptr); eb 102 fs/btrfs/dev-replace.c dev_replace->cursor_right = btrfs_dev_replace_cursor_right(eb, ptr); eb 285 fs/btrfs/dev-replace.c struct extent_buffer *eb; eb 349 fs/btrfs/dev-replace.c eb = path->nodes[0]; eb 350 fs/btrfs/dev-replace.c ptr = btrfs_item_ptr(eb, path->slots[0], eb 355 fs/btrfs/dev-replace.c btrfs_set_dev_replace_src_devid(eb, ptr, eb 358 fs/btrfs/dev-replace.c btrfs_set_dev_replace_src_devid(eb, ptr, (u64)-1); eb 359 fs/btrfs/dev-replace.c btrfs_set_dev_replace_cont_reading_from_srcdev_mode(eb, ptr, eb 361 fs/btrfs/dev-replace.c btrfs_set_dev_replace_replace_state(eb, ptr, eb 363 fs/btrfs/dev-replace.c btrfs_set_dev_replace_time_started(eb, ptr, dev_replace->time_started); eb 364 fs/btrfs/dev-replace.c btrfs_set_dev_replace_time_stopped(eb, ptr, dev_replace->time_stopped); eb 365 fs/btrfs/dev-replace.c btrfs_set_dev_replace_num_write_errors(eb, ptr, eb 367 fs/btrfs/dev-replace.c btrfs_set_dev_replace_num_uncorrectable_read_errors(eb, ptr, eb 371 fs/btrfs/dev-replace.c btrfs_set_dev_replace_cursor_left(eb, ptr, eb 373 fs/btrfs/dev-replace.c btrfs_set_dev_replace_cursor_right(eb, ptr, eb 378 fs/btrfs/dev-replace.c btrfs_mark_buffer_dirty(eb); eb 182 fs/btrfs/disk-io.c void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, eb 194 fs/btrfs/disk-io.c lockdep_set_class_and_name(&eb->lock, eb 302 fs/btrfs/disk-io.c struct extent_buffer *eb, u64 parent_transid, eb 309 fs/btrfs/disk-io.c if (!parent_transid || btrfs_header_generation(eb) == parent_transid) eb 316 fs/btrfs/disk-io.c btrfs_tree_read_lock(eb); eb 317 fs/btrfs/disk-io.c btrfs_set_lock_blocking_read(eb); eb 320 fs/btrfs/disk-io.c lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, eb 322 fs/btrfs/disk-io.c if (extent_buffer_uptodate(eb) && eb 323 fs/btrfs/disk-io.c btrfs_header_generation(eb) == parent_transid) { eb 327 fs/btrfs/disk-io.c btrfs_err_rl(eb->fs_info, eb 329 fs/btrfs/disk-io.c eb->start, eb 330 fs/btrfs/disk-io.c parent_transid, btrfs_header_generation(eb)); eb 341 fs/btrfs/disk-io.c if (!extent_buffer_under_io(eb)) eb 342 fs/btrfs/disk-io.c clear_extent_buffer_uptodate(eb); eb 344 fs/btrfs/disk-io.c unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, eb 347 fs/btrfs/disk-io.c btrfs_tree_read_unlock_blocking(eb); eb 391 fs/btrfs/disk-io.c int btrfs_verify_level_key(struct extent_buffer *eb, int level, eb 394 fs/btrfs/disk-io.c struct btrfs_fs_info *fs_info = eb->fs_info; eb 399 fs/btrfs/disk-io.c found_level = btrfs_header_level(eb); eb 405 fs/btrfs/disk-io.c eb->start, level, found_level); eb 418 fs/btrfs/disk-io.c if (btrfs_header_generation(eb) > fs_info->last_trans_committed) eb 422 fs/btrfs/disk-io.c if (btrfs_header_nritems(eb) == 0) { eb 425 fs/btrfs/disk-io.c eb->start); eb 431 fs/btrfs/disk-io.c btrfs_node_key_to_cpu(eb, &found_key, 0); eb 433 fs/btrfs/disk-io.c btrfs_item_key_to_cpu(eb, &found_key, 0); eb 441 fs/btrfs/disk-io.c eb->start, parent_transid, first_key->objectid, eb 457 fs/btrfs/disk-io.c static int btree_read_extent_buffer_pages(struct extent_buffer *eb, eb 461 fs/btrfs/disk-io.c struct btrfs_fs_info *fs_info = eb->fs_info; eb 471 fs/btrfs/disk-io.c clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); eb 472 fs/btrfs/disk-io.c ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num); eb 474 fs/btrfs/disk-io.c if (verify_parent_transid(io_tree, eb, eb 477 fs/btrfs/disk-io.c else if (btrfs_verify_level_key(eb, level, eb 485 fs/btrfs/disk-io.c eb->start, eb->len); eb 491 fs/btrfs/disk-io.c failed_mirror = eb->read_mirror; eb 503 fs/btrfs/disk-io.c btrfs_repair_eb_io_failure(eb, failed_mirror); eb 519 fs/btrfs/disk-io.c struct extent_buffer *eb; eb 522 fs/btrfs/disk-io.c eb = (struct extent_buffer *)page->private; eb 523 fs/btrfs/disk-io.c if (page != eb->pages[0]) eb 526 fs/btrfs/disk-io.c found_start = btrfs_header_bytenr(eb); eb 536 fs/btrfs/disk-io.c ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid, eb 539 fs/btrfs/disk-io.c if (csum_tree_block(eb, result)) eb 542 fs/btrfs/disk-io.c if (btrfs_header_level(eb)) eb 543 fs/btrfs/disk-io.c ret = btrfs_check_node(eb); eb 545 fs/btrfs/disk-io.c ret = btrfs_check_leaf_full(eb); eb 550 fs/btrfs/disk-io.c eb->start); eb 553 fs/btrfs/disk-io.c write_extent_buffer(eb, result, 0, csum_size); eb 558 fs/btrfs/disk-io.c static int check_tree_block_fsid(struct extent_buffer *eb) eb 560 fs/btrfs/disk-io.c struct btrfs_fs_info *fs_info = eb->fs_info; eb 565 fs/btrfs/disk-io.c read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE); eb 595 fs/btrfs/disk-io.c struct extent_buffer *eb; eb 606 fs/btrfs/disk-io.c eb = (struct extent_buffer *)page->private; eb 611 fs/btrfs/disk-io.c extent_buffer_get(eb); eb 613 fs/btrfs/disk-io.c reads_done = atomic_dec_and_test(&eb->io_pages); eb 617 fs/btrfs/disk-io.c eb->read_mirror = mirror; eb 618 fs/btrfs/disk-io.c if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { eb 623 fs/btrfs/disk-io.c found_start = btrfs_header_bytenr(eb); eb 624 fs/btrfs/disk-io.c if (found_start != eb->start) { eb 626 fs/btrfs/disk-io.c eb->start, found_start); eb 630 fs/btrfs/disk-io.c if (check_tree_block_fsid(eb)) { eb 632 fs/btrfs/disk-io.c eb->start); eb 636 fs/btrfs/disk-io.c found_level = btrfs_header_level(eb); eb 639 fs/btrfs/disk-io.c (int)btrfs_header_level(eb), eb->start); eb 644 fs/btrfs/disk-io.c btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb 645 fs/btrfs/disk-io.c eb, found_level); eb 647 fs/btrfs/disk-io.c ret = csum_tree_block(eb, result); eb 651 fs/btrfs/disk-io.c if (memcmp_extent_buffer(eb, result, 0, csum_size)) { eb 657 fs/btrfs/disk-io.c read_extent_buffer(eb, &val, 0, csum_size); eb 660 fs/btrfs/disk-io.c fs_info->sb->s_id, eb->start, eb 661 fs/btrfs/disk-io.c val, found, btrfs_header_level(eb)); eb 671 fs/btrfs/disk-io.c if (found_level == 0 && btrfs_check_leaf_full(eb)) { eb 672 fs/btrfs/disk-io.c set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); eb 676 fs/btrfs/disk-io.c if (found_level > 0 && btrfs_check_node(eb)) eb 680 fs/btrfs/disk-io.c set_extent_buffer_uptodate(eb); eb 684 fs/btrfs/disk-io.c eb->start); eb 687 fs/btrfs/disk-io.c test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) eb 688 fs/btrfs/disk-io.c btree_readahead_hook(eb, ret); eb 696 fs/btrfs/disk-io.c atomic_inc(&eb->io_pages); eb 697 fs/btrfs/disk-io.c clear_extent_buffer_uptodate(eb); eb 699 fs/btrfs/disk-io.c free_extent_buffer(eb); eb 1000 fs/btrfs/disk-io.c struct extent_buffer *eb; eb 1003 fs/btrfs/disk-io.c eb = (struct extent_buffer *)page->private; eb 1004 fs/btrfs/disk-io.c BUG_ON(!eb); eb 1005 fs/btrfs/disk-io.c BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); eb 1006 fs/btrfs/disk-io.c BUG_ON(!atomic_read(&eb->refs)); eb 1007 fs/btrfs/disk-io.c btrfs_assert_tree_locked(eb); eb 4387 fs/btrfs/disk-io.c struct extent_buffer *eb; eb 4399 fs/btrfs/disk-io.c eb = find_extent_buffer(fs_info, start); eb 4401 fs/btrfs/disk-io.c if (!eb) eb 4403 fs/btrfs/disk-io.c wait_on_extent_buffer_writeback(eb); eb 4406 fs/btrfs/disk-io.c &eb->bflags)) eb 4407 fs/btrfs/disk-io.c clear_extent_buffer_dirty(eb); eb 4408 fs/btrfs/disk-io.c free_extent_buffer_stale(eb); eb 42 fs/btrfs/disk-io.h int btrfs_verify_level_key(struct extent_buffer *eb, int level, eb 146 fs/btrfs/disk-io.h struct extent_buffer *eb, int level); eb 151 fs/btrfs/disk-io.h struct extent_buffer *eb, int level) eb 388 fs/btrfs/extent-tree.c int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, eb 392 fs/btrfs/extent-tree.c int type = btrfs_extent_inline_ref_type(eb, iref); eb 393 fs/btrfs/extent-tree.c u64 offset = btrfs_extent_inline_ref_offset(eb, iref); eb 403 fs/btrfs/extent-tree.c ASSERT(eb->fs_info); eb 410 fs/btrfs/extent-tree.c IS_ALIGNED(offset, eb->fs_info->nodesize)) eb 417 fs/btrfs/extent-tree.c ASSERT(eb->fs_info); eb 424 fs/btrfs/extent-tree.c IS_ALIGNED(offset, eb->fs_info->nodesize)) eb 433 fs/btrfs/extent-tree.c btrfs_print_leaf((struct extent_buffer *)eb); eb 434 fs/btrfs/extent-tree.c btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d", eb 435 fs/btrfs/extent-tree.c eb->start, type); eb 2687 fs/btrfs/extent-tree.c int btrfs_exclude_logged_extents(struct extent_buffer *eb) eb 2689 fs/btrfs/extent-tree.c struct btrfs_fs_info *fs_info = eb->fs_info; eb 2699 fs/btrfs/extent-tree.c for (i = 0; i < btrfs_header_nritems(eb); i++) { eb 2700 fs/btrfs/extent-tree.c btrfs_item_key_to_cpu(eb, &key, i); eb 2703 fs/btrfs/extent-tree.c item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); eb 2704 fs/btrfs/extent-tree.c found_type = btrfs_file_extent_type(eb, item); eb 2707 fs/btrfs/extent-tree.c if (btrfs_file_extent_disk_bytenr(eb, item) == 0) eb 2709 fs/btrfs/extent-tree.c key.objectid = btrfs_file_extent_disk_bytenr(eb, item); eb 2710 fs/btrfs/extent-tree.c key.offset = btrfs_file_extent_disk_num_bytes(eb, item); eb 4607 fs/btrfs/extent-tree.c struct extent_buffer *eb; eb 4621 fs/btrfs/extent-tree.c eb = path->nodes[wc->level]; eb 4622 fs/btrfs/extent-tree.c nritems = btrfs_header_nritems(eb); eb 4629 fs/btrfs/extent-tree.c bytenr = btrfs_node_blockptr(eb, slot); eb 4630 fs/btrfs/extent-tree.c generation = btrfs_node_ptr_generation(eb, slot); eb 4658 fs/btrfs/extent-tree.c btrfs_node_key_to_cpu(eb, &key, slot); eb 4690 fs/btrfs/extent-tree.c struct extent_buffer *eb = path->nodes[level]; eb 4695 fs/btrfs/extent-tree.c btrfs_header_owner(eb) != root->root_key.objectid) eb 4707 fs/btrfs/extent-tree.c eb->start, level, 1, eb 4721 fs/btrfs/extent-tree.c btrfs_tree_unlock_rw(eb, path->locks[level]); eb 4730 fs/btrfs/extent-tree.c ret = btrfs_inc_ref(trans, root, eb, 1); eb 4732 fs/btrfs/extent-tree.c ret = btrfs_dec_ref(trans, root, eb, 0); eb 4734 fs/btrfs/extent-tree.c ret = btrfs_set_disk_extent_flags(trans, eb->start, eb 4735 fs/btrfs/extent-tree.c eb->len, flag, eb 4736 fs/btrfs/extent-tree.c btrfs_header_level(eb), 0); eb 4746 fs/btrfs/extent-tree.c btrfs_tree_unlock_rw(eb, path->locks[level]); eb 5014 fs/btrfs/extent-tree.c struct extent_buffer *eb = path->nodes[level]; eb 5037 fs/btrfs/extent-tree.c btrfs_tree_lock(eb); eb 5038 fs/btrfs/extent-tree.c btrfs_set_lock_blocking_write(eb); eb 5042 fs/btrfs/extent-tree.c eb->start, level, 1, eb 5046 fs/btrfs/extent-tree.c btrfs_tree_unlock_rw(eb, path->locks[level]); eb 5052 fs/btrfs/extent-tree.c btrfs_tree_unlock_rw(eb, path->locks[level]); eb 5065 fs/btrfs/extent-tree.c ret = btrfs_dec_ref(trans, root, eb, 1); eb 5067 fs/btrfs/extent-tree.c ret = btrfs_dec_ref(trans, root, eb, 0); eb 5070 fs/btrfs/extent-tree.c ret = btrfs_qgroup_trace_leaf_items(trans, eb); eb 5080 fs/btrfs/extent-tree.c btrfs_header_generation(eb) == trans->transid) { eb 5081 fs/btrfs/extent-tree.c btrfs_tree_lock(eb); eb 5082 fs/btrfs/extent-tree.c btrfs_set_lock_blocking_write(eb); eb 5085 fs/btrfs/extent-tree.c btrfs_clean_tree_block(eb); eb 5088 fs/btrfs/extent-tree.c if (eb == root->node) { eb 5090 fs/btrfs/extent-tree.c parent = eb->start; eb 5091 fs/btrfs/extent-tree.c else if (root->root_key.objectid != btrfs_header_owner(eb)) eb 5101 fs/btrfs/extent-tree.c btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1); eb 5109 fs/btrfs/extent-tree.c btrfs_header_owner(eb), root->root_key.objectid); eb 66 fs/btrfs/extent_io.c struct extent_buffer *eb; eb 79 fs/btrfs/extent_io.c eb = list_entry(buffers.next, struct extent_buffer, leak_list); eb 81 fs/btrfs/extent_io.c eb->start, eb->len, atomic_read(&eb->refs), eb->bflags); eb 82 fs/btrfs/extent_io.c list_del(&eb->leak_list); eb 83 fs/btrfs/extent_io.c kmem_cache_free(extent_buffer_cache, eb); eb 2260 fs/btrfs/extent_io.c int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num) eb 2262 fs/btrfs/extent_io.c struct btrfs_fs_info *fs_info = eb->fs_info; eb 2263 fs/btrfs/extent_io.c u64 start = eb->start; eb 2264 fs/btrfs/extent_io.c int i, num_pages = num_extent_pages(eb); eb 2271 fs/btrfs/extent_io.c struct page *p = eb->pages[i]; eb 2795 fs/btrfs/extent_io.c struct extent_buffer *eb; eb 2797 fs/btrfs/extent_io.c eb = (struct extent_buffer *)page->private; eb 2798 fs/btrfs/extent_io.c set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); eb 2799 fs/btrfs/extent_io.c eb->read_mirror = mirror; eb 2800 fs/btrfs/extent_io.c atomic_dec(&eb->io_pages); eb 2802 fs/btrfs/extent_io.c &eb->bflags)) eb 2803 fs/btrfs/extent_io.c btree_readahead_hook(eb, -EIO); eb 3000 fs/btrfs/extent_io.c static void attach_extent_buffer_page(struct extent_buffer *eb, eb 3006 fs/btrfs/extent_io.c set_page_private(page, (unsigned long)eb); eb 3008 fs/btrfs/extent_io.c WARN_ON(page->private != (unsigned long)eb); eb 3629 fs/btrfs/extent_io.c void wait_on_extent_buffer_writeback(struct extent_buffer *eb) eb 3631 fs/btrfs/extent_io.c wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb 3635 fs/btrfs/extent_io.c static void end_extent_buffer_writeback(struct extent_buffer *eb) eb 3637 fs/btrfs/extent_io.c clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); eb 3639 fs/btrfs/extent_io.c wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); eb 3649 fs/btrfs/extent_io.c static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb, eb 3652 fs/btrfs/extent_io.c struct btrfs_fs_info *fs_info = eb->fs_info; eb 3657 fs/btrfs/extent_io.c if (!btrfs_try_tree_write_lock(eb)) { eb 3662 fs/btrfs/extent_io.c btrfs_tree_lock(eb); eb 3665 fs/btrfs/extent_io.c if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { eb 3666 fs/btrfs/extent_io.c btrfs_tree_unlock(eb); eb 3676 fs/btrfs/extent_io.c wait_on_extent_buffer_writeback(eb); eb 3677 fs/btrfs/extent_io.c btrfs_tree_lock(eb); eb 3678 fs/btrfs/extent_io.c if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) eb 3680 fs/btrfs/extent_io.c btrfs_tree_unlock(eb); eb 3689 fs/btrfs/extent_io.c spin_lock(&eb->refs_lock); eb 3690 fs/btrfs/extent_io.c if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { eb 3691 fs/btrfs/extent_io.c set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); eb 3692 fs/btrfs/extent_io.c spin_unlock(&eb->refs_lock); eb 3693 fs/btrfs/extent_io.c btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); eb 3695 fs/btrfs/extent_io.c -eb->len, eb 3699 fs/btrfs/extent_io.c spin_unlock(&eb->refs_lock); eb 3702 fs/btrfs/extent_io.c btrfs_tree_unlock(eb); eb 3707 fs/btrfs/extent_io.c num_pages = num_extent_pages(eb); eb 3709 fs/btrfs/extent_io.c struct page *p = eb->pages[i]; eb 3731 fs/btrfs/extent_io.c unlock_page(eb->pages[i]); eb 3737 fs/btrfs/extent_io.c btrfs_tree_lock(eb); eb 3738 fs/btrfs/extent_io.c spin_lock(&eb->refs_lock); eb 3739 fs/btrfs/extent_io.c set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); eb 3740 fs/btrfs/extent_io.c end_extent_buffer_writeback(eb); eb 3741 fs/btrfs/extent_io.c spin_unlock(&eb->refs_lock); eb 3742 fs/btrfs/extent_io.c percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len, eb 3744 fs/btrfs/extent_io.c btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); eb 3745 fs/btrfs/extent_io.c btrfs_tree_unlock(eb); eb 3751 fs/btrfs/extent_io.c struct extent_buffer *eb = (struct extent_buffer *)page->private; eb 3755 fs/btrfs/extent_io.c if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) eb 3762 fs/btrfs/extent_io.c fs_info = eb->fs_info; eb 3764 fs/btrfs/extent_io.c eb->len, fs_info->dirty_metadata_batch); eb 3804 fs/btrfs/extent_io.c switch (eb->log_index) { eb 3806 fs/btrfs/extent_io.c set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags); eb 3809 fs/btrfs/extent_io.c set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags); eb 3812 fs/btrfs/extent_io.c set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags); eb 3822 fs/btrfs/extent_io.c struct extent_buffer *eb; eb 3830 fs/btrfs/extent_io.c eb = (struct extent_buffer *)page->private; eb 3831 fs/btrfs/extent_io.c BUG_ON(!eb); eb 3832 fs/btrfs/extent_io.c done = atomic_dec_and_test(&eb->io_pages); eb 3835 fs/btrfs/extent_io.c test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { eb 3845 fs/btrfs/extent_io.c end_extent_buffer_writeback(eb); eb 3851 fs/btrfs/extent_io.c static noinline_for_stack int write_one_eb(struct extent_buffer *eb, eb 3855 fs/btrfs/extent_io.c struct btrfs_fs_info *fs_info = eb->fs_info; eb 3858 fs/btrfs/extent_io.c u64 offset = eb->start; eb 3865 fs/btrfs/extent_io.c clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); eb 3866 fs/btrfs/extent_io.c num_pages = num_extent_pages(eb); eb 3867 fs/btrfs/extent_io.c atomic_set(&eb->io_pages, num_pages); eb 3870 fs/btrfs/extent_io.c nritems = btrfs_header_nritems(eb); eb 3871 fs/btrfs/extent_io.c if (btrfs_header_level(eb) > 0) { eb 3874 fs/btrfs/extent_io.c memzero_extent_buffer(eb, end, eb->len - end); eb 3881 fs/btrfs/extent_io.c end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb); eb 3882 fs/btrfs/extent_io.c memzero_extent_buffer(eb, start, end - start); eb 3886 fs/btrfs/extent_io.c struct page *p = eb->pages[i]; eb 3899 fs/btrfs/extent_io.c if (atomic_sub_and_test(num_pages - i, &eb->io_pages)) eb 3900 fs/btrfs/extent_io.c end_extent_buffer_writeback(eb); eb 3911 fs/btrfs/extent_io.c struct page *p = eb->pages[i]; eb 3924 fs/btrfs/extent_io.c struct extent_buffer *eb, *prev_eb = NULL; eb 3980 fs/btrfs/extent_io.c eb = (struct extent_buffer *)page->private; eb 3987 fs/btrfs/extent_io.c if (WARN_ON(!eb)) { eb 3992 fs/btrfs/extent_io.c if (eb == prev_eb) { eb 3997 fs/btrfs/extent_io.c ret = atomic_inc_not_zero(&eb->refs); eb 4002 fs/btrfs/extent_io.c prev_eb = eb; eb 4003 fs/btrfs/extent_io.c ret = lock_extent_buffer_for_io(eb, &epd); eb 4005 fs/btrfs/extent_io.c free_extent_buffer(eb); eb 4009 fs/btrfs/extent_io.c free_extent_buffer(eb); eb 4013 fs/btrfs/extent_io.c ret = write_one_eb(eb, wbc, &epd); eb 4016 fs/btrfs/extent_io.c free_extent_buffer(eb); eb 4019 fs/btrfs/extent_io.c free_extent_buffer(eb); eb 4848 fs/btrfs/extent_io.c static void __free_extent_buffer(struct extent_buffer *eb) eb 4850 fs/btrfs/extent_io.c btrfs_leak_debug_del(&eb->leak_list); eb 4851 fs/btrfs/extent_io.c kmem_cache_free(extent_buffer_cache, eb); eb 4854 fs/btrfs/extent_io.c int extent_buffer_under_io(struct extent_buffer *eb) eb 4856 fs/btrfs/extent_io.c return (atomic_read(&eb->io_pages) || eb 4857 fs/btrfs/extent_io.c test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || eb 4858 fs/btrfs/extent_io.c test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); eb 4864 fs/btrfs/extent_io.c static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb) eb 4868 fs/btrfs/extent_io.c int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); eb 4870 fs/btrfs/extent_io.c BUG_ON(extent_buffer_under_io(eb)); eb 4872 fs/btrfs/extent_io.c num_pages = num_extent_pages(eb); eb 4874 fs/btrfs/extent_io.c struct page *page = eb->pages[i]; eb 4888 fs/btrfs/extent_io.c page->private == (unsigned long)eb) { eb 4889 fs/btrfs/extent_io.c BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); eb 4913 fs/btrfs/extent_io.c static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) eb 4915 fs/btrfs/extent_io.c btrfs_release_extent_buffer_pages(eb); eb 4916 fs/btrfs/extent_io.c __free_extent_buffer(eb); eb 4923 fs/btrfs/extent_io.c struct extent_buffer *eb = NULL; eb 4925 fs/btrfs/extent_io.c eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL); eb 4926 fs/btrfs/extent_io.c eb->start = start; eb 4927 fs/btrfs/extent_io.c eb->len = len; eb 4928 fs/btrfs/extent_io.c eb->fs_info = fs_info; eb 4929 fs/btrfs/extent_io.c eb->bflags = 0; eb 4930 fs/btrfs/extent_io.c rwlock_init(&eb->lock); eb 4931 fs/btrfs/extent_io.c atomic_set(&eb->blocking_readers, 0); eb 4932 fs/btrfs/extent_io.c eb->blocking_writers = 0; eb 4933 fs/btrfs/extent_io.c eb->lock_nested = false; eb 4934 fs/btrfs/extent_io.c init_waitqueue_head(&eb->write_lock_wq); eb 4935 fs/btrfs/extent_io.c init_waitqueue_head(&eb->read_lock_wq); eb 4937 fs/btrfs/extent_io.c btrfs_leak_debug_add(&eb->leak_list, &buffers); eb 4939 fs/btrfs/extent_io.c spin_lock_init(&eb->refs_lock); eb 4940 fs/btrfs/extent_io.c atomic_set(&eb->refs, 1); eb 4941 fs/btrfs/extent_io.c atomic_set(&eb->io_pages, 0); eb 4951 fs/btrfs/extent_io.c eb->spinning_writers = 0; eb 4952 fs/btrfs/extent_io.c atomic_set(&eb->spinning_readers, 0); eb 4953 fs/btrfs/extent_io.c atomic_set(&eb->read_locks, 0); eb 4954 fs/btrfs/extent_io.c eb->write_locks = 0; eb 4957 fs/btrfs/extent_io.c return eb; eb 4993 fs/btrfs/extent_io.c struct extent_buffer *eb; eb 4997 fs/btrfs/extent_io.c eb = __alloc_extent_buffer(fs_info, start, len); eb 4998 fs/btrfs/extent_io.c if (!eb) eb 5001 fs/btrfs/extent_io.c num_pages = num_extent_pages(eb); eb 5003 fs/btrfs/extent_io.c eb->pages[i] = alloc_page(GFP_NOFS); eb 5004 fs/btrfs/extent_io.c if (!eb->pages[i]) eb 5007 fs/btrfs/extent_io.c set_extent_buffer_uptodate(eb); eb 5008 fs/btrfs/extent_io.c btrfs_set_header_nritems(eb, 0); eb 5009 fs/btrfs/extent_io.c set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); eb 5011 fs/btrfs/extent_io.c return eb; eb 5014 fs/btrfs/extent_io.c __free_page(eb->pages[i - 1]); eb 5015 fs/btrfs/extent_io.c __free_extent_buffer(eb); eb 5025 fs/btrfs/extent_io.c static void check_buffer_tree_ref(struct extent_buffer *eb) eb 5048 fs/btrfs/extent_io.c refs = atomic_read(&eb->refs); eb 5049 fs/btrfs/extent_io.c if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) eb 5052 fs/btrfs/extent_io.c spin_lock(&eb->refs_lock); eb 5053 fs/btrfs/extent_io.c if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) eb 5054 fs/btrfs/extent_io.c atomic_inc(&eb->refs); eb 5055 fs/btrfs/extent_io.c spin_unlock(&eb->refs_lock); eb 5058 fs/btrfs/extent_io.c static void mark_extent_buffer_accessed(struct extent_buffer *eb, eb 5063 fs/btrfs/extent_io.c check_buffer_tree_ref(eb); eb 5065 fs/btrfs/extent_io.c num_pages = num_extent_pages(eb); eb 5067 fs/btrfs/extent_io.c struct page *p = eb->pages[i]; eb 5077 fs/btrfs/extent_io.c struct extent_buffer *eb; eb 5080 fs/btrfs/extent_io.c eb = radix_tree_lookup(&fs_info->buffer_radix, eb 5082 fs/btrfs/extent_io.c if (eb && atomic_inc_not_zero(&eb->refs)) { eb 5099 fs/btrfs/extent_io.c if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { eb 5100 fs/btrfs/extent_io.c spin_lock(&eb->refs_lock); eb 5101 fs/btrfs/extent_io.c spin_unlock(&eb->refs_lock); eb 5103 fs/btrfs/extent_io.c mark_extent_buffer_accessed(eb, NULL); eb 5104 fs/btrfs/extent_io.c return eb; eb 5115 fs/btrfs/extent_io.c struct extent_buffer *eb, *exists = NULL; eb 5118 fs/btrfs/extent_io.c eb = find_extent_buffer(fs_info, start); eb 5119 fs/btrfs/extent_io.c if (eb) eb 5120 fs/btrfs/extent_io.c return eb; eb 5121 fs/btrfs/extent_io.c eb = alloc_dummy_extent_buffer(fs_info, start); eb 5122 fs/btrfs/extent_io.c if (!eb) eb 5124 fs/btrfs/extent_io.c eb->fs_info = fs_info; eb 5133 fs/btrfs/extent_io.c start >> PAGE_SHIFT, eb); eb 5143 fs/btrfs/extent_io.c check_buffer_tree_ref(eb); eb 5144 fs/btrfs/extent_io.c set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); eb 5146 fs/btrfs/extent_io.c return eb; eb 5148 fs/btrfs/extent_io.c btrfs_release_extent_buffer(eb); eb 5160 fs/btrfs/extent_io.c struct extent_buffer *eb; eb 5172 fs/btrfs/extent_io.c eb = find_extent_buffer(fs_info, start); eb 5173 fs/btrfs/extent_io.c if (eb) eb 5174 fs/btrfs/extent_io.c return eb; eb 5176 fs/btrfs/extent_io.c eb = __alloc_extent_buffer(fs_info, start, len); eb 5177 fs/btrfs/extent_io.c if (!eb) eb 5180 fs/btrfs/extent_io.c num_pages = num_extent_pages(eb); eb 5215 fs/btrfs/extent_io.c attach_extent_buffer_page(eb, p); eb 5218 fs/btrfs/extent_io.c eb->pages[i] = p; eb 5231 fs/btrfs/extent_io.c set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); eb 5241 fs/btrfs/extent_io.c start >> PAGE_SHIFT, eb); eb 5252 fs/btrfs/extent_io.c check_buffer_tree_ref(eb); eb 5253 fs/btrfs/extent_io.c set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); eb 5261 fs/btrfs/extent_io.c unlock_page(eb->pages[i]); eb 5262 fs/btrfs/extent_io.c return eb; eb 5265 fs/btrfs/extent_io.c WARN_ON(!atomic_dec_and_test(&eb->refs)); eb 5267 fs/btrfs/extent_io.c if (eb->pages[i]) eb 5268 fs/btrfs/extent_io.c unlock_page(eb->pages[i]); eb 5271 fs/btrfs/extent_io.c btrfs_release_extent_buffer(eb); eb 5277 fs/btrfs/extent_io.c struct extent_buffer *eb = eb 5280 fs/btrfs/extent_io.c __free_extent_buffer(eb); eb 5283 fs/btrfs/extent_io.c static int release_extent_buffer(struct extent_buffer *eb) eb 5285 fs/btrfs/extent_io.c lockdep_assert_held(&eb->refs_lock); eb 5287 fs/btrfs/extent_io.c WARN_ON(atomic_read(&eb->refs) == 0); eb 5288 fs/btrfs/extent_io.c if (atomic_dec_and_test(&eb->refs)) { eb 5289 fs/btrfs/extent_io.c if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { eb 5290 fs/btrfs/extent_io.c struct btrfs_fs_info *fs_info = eb->fs_info; eb 5292 fs/btrfs/extent_io.c spin_unlock(&eb->refs_lock); eb 5296 fs/btrfs/extent_io.c eb->start >> PAGE_SHIFT); eb 5299 fs/btrfs/extent_io.c spin_unlock(&eb->refs_lock); eb 5303 fs/btrfs/extent_io.c btrfs_release_extent_buffer_pages(eb); eb 5305 fs/btrfs/extent_io.c if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { eb 5306 fs/btrfs/extent_io.c __free_extent_buffer(eb); eb 5310 fs/btrfs/extent_io.c call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); eb 5313 fs/btrfs/extent_io.c spin_unlock(&eb->refs_lock); eb 5318 fs/btrfs/extent_io.c void free_extent_buffer(struct extent_buffer *eb) eb 5322 fs/btrfs/extent_io.c if (!eb) eb 5326 fs/btrfs/extent_io.c refs = atomic_read(&eb->refs); eb 5327 fs/btrfs/extent_io.c if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3) eb 5328 fs/btrfs/extent_io.c || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && eb 5331 fs/btrfs/extent_io.c old = atomic_cmpxchg(&eb->refs, refs, refs - 1); eb 5336 fs/btrfs/extent_io.c spin_lock(&eb->refs_lock); eb 5337 fs/btrfs/extent_io.c if (atomic_read(&eb->refs) == 2 && eb 5338 fs/btrfs/extent_io.c test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && eb 5339 fs/btrfs/extent_io.c !extent_buffer_under_io(eb) && eb 5340 fs/btrfs/extent_io.c test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) eb 5341 fs/btrfs/extent_io.c atomic_dec(&eb->refs); eb 5347 fs/btrfs/extent_io.c release_extent_buffer(eb); eb 5350 fs/btrfs/extent_io.c void free_extent_buffer_stale(struct extent_buffer *eb) eb 5352 fs/btrfs/extent_io.c if (!eb) eb 5355 fs/btrfs/extent_io.c spin_lock(&eb->refs_lock); eb 5356 fs/btrfs/extent_io.c set_bit(EXTENT_BUFFER_STALE, &eb->bflags); eb 5358 fs/btrfs/extent_io.c if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && eb 5359 fs/btrfs/extent_io.c test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) eb 5360 fs/btrfs/extent_io.c atomic_dec(&eb->refs); eb 5361 fs/btrfs/extent_io.c release_extent_buffer(eb); eb 5364 fs/btrfs/extent_io.c void clear_extent_buffer_dirty(struct extent_buffer *eb) eb 5370 fs/btrfs/extent_io.c num_pages = num_extent_pages(eb); eb 5373 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5389 fs/btrfs/extent_io.c WARN_ON(atomic_read(&eb->refs) == 0); eb 5392 fs/btrfs/extent_io.c bool set_extent_buffer_dirty(struct extent_buffer *eb) eb 5398 fs/btrfs/extent_io.c check_buffer_tree_ref(eb); eb 5400 fs/btrfs/extent_io.c was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); eb 5402 fs/btrfs/extent_io.c num_pages = num_extent_pages(eb); eb 5403 fs/btrfs/extent_io.c WARN_ON(atomic_read(&eb->refs) == 0); eb 5404 fs/btrfs/extent_io.c WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); eb 5408 fs/btrfs/extent_io.c set_page_dirty(eb->pages[i]); eb 5412 fs/btrfs/extent_io.c ASSERT(PageDirty(eb->pages[i])); eb 5418 fs/btrfs/extent_io.c void clear_extent_buffer_uptodate(struct extent_buffer *eb) eb 5424 fs/btrfs/extent_io.c clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); eb 5425 fs/btrfs/extent_io.c num_pages = num_extent_pages(eb); eb 5427 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5433 fs/btrfs/extent_io.c void set_extent_buffer_uptodate(struct extent_buffer *eb) eb 5439 fs/btrfs/extent_io.c set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); eb 5440 fs/btrfs/extent_io.c num_pages = num_extent_pages(eb); eb 5442 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5447 fs/btrfs/extent_io.c int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num) eb 5459 fs/btrfs/extent_io.c struct extent_io_tree *tree = &BTRFS_I(eb->fs_info->btree_inode)->io_tree; eb 5461 fs/btrfs/extent_io.c if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) eb 5464 fs/btrfs/extent_io.c num_pages = num_extent_pages(eb); eb 5466 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5481 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5489 fs/btrfs/extent_io.c set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); eb 5493 fs/btrfs/extent_io.c clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); eb 5494 fs/btrfs/extent_io.c eb->read_mirror = 0; eb 5495 fs/btrfs/extent_io.c atomic_set(&eb->io_pages, num_reads); eb 5497 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5501 fs/btrfs/extent_io.c atomic_dec(&eb->io_pages); eb 5521 fs/btrfs/extent_io.c atomic_dec(&eb->io_pages); eb 5538 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5549 fs/btrfs/extent_io.c page = eb->pages[locked_pages]; eb 5555 fs/btrfs/extent_io.c void read_extent_buffer(const struct extent_buffer *eb, void *dstv, eb 5563 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(eb->start); eb 5566 fs/btrfs/extent_io.c if (start + len > eb->len) { eb 5568 fs/btrfs/extent_io.c eb->start, eb->len, start, len); eb 5576 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5589 fs/btrfs/extent_io.c int read_extent_buffer_to_user(const struct extent_buffer *eb, eb 5598 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(eb->start); eb 5602 fs/btrfs/extent_io.c WARN_ON(start > eb->len); eb 5603 fs/btrfs/extent_io.c WARN_ON(start + len > eb->start + eb->len); eb 5608 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5631 fs/btrfs/extent_io.c int map_private_extent_buffer(const struct extent_buffer *eb, eb 5639 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(eb->start); eb 5644 fs/btrfs/extent_io.c if (start + min_len > eb->len) { eb 5646 fs/btrfs/extent_io.c eb->start, eb->len, start, min_len); eb 5661 fs/btrfs/extent_io.c p = eb->pages[i]; eb 5668 fs/btrfs/extent_io.c int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, eb 5676 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(eb->start); eb 5680 fs/btrfs/extent_io.c WARN_ON(start > eb->len); eb 5681 fs/btrfs/extent_io.c WARN_ON(start + len > eb->start + eb->len); eb 5686 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5703 fs/btrfs/extent_io.c void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb, eb 5708 fs/btrfs/extent_io.c WARN_ON(!PageUptodate(eb->pages[0])); eb 5709 fs/btrfs/extent_io.c kaddr = page_address(eb->pages[0]); eb 5714 fs/btrfs/extent_io.c void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv) eb 5718 fs/btrfs/extent_io.c WARN_ON(!PageUptodate(eb->pages[0])); eb 5719 fs/btrfs/extent_io.c kaddr = page_address(eb->pages[0]); eb 5724 fs/btrfs/extent_io.c void write_extent_buffer(struct extent_buffer *eb, const void *srcv, eb 5732 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(eb->start); eb 5735 fs/btrfs/extent_io.c WARN_ON(start > eb->len); eb 5736 fs/btrfs/extent_io.c WARN_ON(start + len > eb->start + eb->len); eb 5741 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5755 fs/btrfs/extent_io.c void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start, eb 5762 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(eb->start); eb 5765 fs/btrfs/extent_io.c WARN_ON(start > eb->len); eb 5766 fs/btrfs/extent_io.c WARN_ON(start + len > eb->start + eb->len); eb 5771 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5843 fs/btrfs/extent_io.c static inline void eb_bitmap_offset(struct extent_buffer *eb, eb 5848 fs/btrfs/extent_io.c size_t start_offset = offset_in_page(eb->start); eb 5869 fs/btrfs/extent_io.c int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, eb 5877 fs/btrfs/extent_io.c eb_bitmap_offset(eb, start, nr, &i, &offset); eb 5878 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5891 fs/btrfs/extent_io.c void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, eb 5902 fs/btrfs/extent_io.c eb_bitmap_offset(eb, start, pos, &i, &offset); eb 5903 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5914 fs/btrfs/extent_io.c page = eb->pages[++i]; eb 5933 fs/btrfs/extent_io.c void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, eb 5944 fs/btrfs/extent_io.c eb_bitmap_offset(eb, start, pos, &i, &offset); eb 5945 fs/btrfs/extent_io.c page = eb->pages[i]; eb 5956 fs/btrfs/extent_io.c page = eb->pages[++i]; eb 6090 fs/btrfs/extent_io.c struct extent_buffer *eb; eb 6102 fs/btrfs/extent_io.c eb = (struct extent_buffer *)page->private; eb 6103 fs/btrfs/extent_io.c BUG_ON(!eb); eb 6110 fs/btrfs/extent_io.c spin_lock(&eb->refs_lock); eb 6111 fs/btrfs/extent_io.c if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { eb 6112 fs/btrfs/extent_io.c spin_unlock(&eb->refs_lock); eb 6122 fs/btrfs/extent_io.c if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { eb 6123 fs/btrfs/extent_io.c spin_unlock(&eb->refs_lock); eb 6127 fs/btrfs/extent_io.c return release_extent_buffer(eb); eb 430 fs/btrfs/extent_io.h void free_extent_buffer(struct extent_buffer *eb); eb 431 fs/btrfs/extent_io.h void free_extent_buffer_stale(struct extent_buffer *eb); eb 435 fs/btrfs/extent_io.h int read_extent_buffer_pages(struct extent_buffer *eb, int wait, eb 437 fs/btrfs/extent_io.h void wait_on_extent_buffer_writeback(struct extent_buffer *eb); eb 439 fs/btrfs/extent_io.h static inline int num_extent_pages(const struct extent_buffer *eb) eb 441 fs/btrfs/extent_io.h return (round_up(eb->start + eb->len, PAGE_SIZE) >> PAGE_SHIFT) - eb 442 fs/btrfs/extent_io.h (eb->start >> PAGE_SHIFT); eb 445 fs/btrfs/extent_io.h static inline void extent_buffer_get(struct extent_buffer *eb) eb 447 fs/btrfs/extent_io.h atomic_inc(&eb->refs); eb 450 fs/btrfs/extent_io.h static inline int extent_buffer_uptodate(struct extent_buffer *eb) eb 452 fs/btrfs/extent_io.h return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); eb 455 fs/btrfs/extent_io.h int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, eb 457 fs/btrfs/extent_io.h void read_extent_buffer(const struct extent_buffer *eb, void *dst, eb 460 fs/btrfs/extent_io.h int read_extent_buffer_to_user(const struct extent_buffer *eb, eb 463 fs/btrfs/extent_io.h void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src); eb 464 fs/btrfs/extent_io.h void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb, eb 466 fs/btrfs/extent_io.h void write_extent_buffer(struct extent_buffer *eb, const void *src, eb 477 fs/btrfs/extent_io.h void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start, eb 479 fs/btrfs/extent_io.h int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, eb 481 fs/btrfs/extent_io.h void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, eb 483 fs/btrfs/extent_io.h void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, eb 485 fs/btrfs/extent_io.h void clear_extent_buffer_dirty(struct extent_buffer *eb); eb 486 fs/btrfs/extent_io.h bool set_extent_buffer_dirty(struct extent_buffer *eb); eb 487 fs/btrfs/extent_io.h void set_extent_buffer_uptodate(struct extent_buffer *eb); eb 488 fs/btrfs/extent_io.h void clear_extent_buffer_uptodate(struct extent_buffer *eb); eb 489 fs/btrfs/extent_io.h int extent_buffer_under_io(struct extent_buffer *eb); eb 490 fs/btrfs/extent_io.h int map_private_extent_buffer(const struct extent_buffer *eb, eb 516 fs/btrfs/extent_io.h int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num); eb 17 fs/btrfs/locking.c static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) eb 19 fs/btrfs/locking.c WARN_ON(eb->spinning_writers); eb 20 fs/btrfs/locking.c eb->spinning_writers++; eb 23 fs/btrfs/locking.c static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) eb 25 fs/btrfs/locking.c WARN_ON(eb->spinning_writers != 1); eb 26 fs/btrfs/locking.c eb->spinning_writers--; eb 29 fs/btrfs/locking.c static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) eb 31 fs/btrfs/locking.c WARN_ON(eb->spinning_writers); eb 34 fs/btrfs/locking.c static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) eb 36 fs/btrfs/locking.c atomic_inc(&eb->spinning_readers); eb 39 fs/btrfs/locking.c static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) eb 41 fs/btrfs/locking.c WARN_ON(atomic_read(&eb->spinning_readers) == 0); eb 42 fs/btrfs/locking.c atomic_dec(&eb->spinning_readers); eb 45 fs/btrfs/locking.c static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) eb 47 fs/btrfs/locking.c atomic_inc(&eb->read_locks); eb 50 fs/btrfs/locking.c static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) eb 52 fs/btrfs/locking.c atomic_dec(&eb->read_locks); eb 55 fs/btrfs/locking.c static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) eb 57 fs/btrfs/locking.c BUG_ON(!atomic_read(&eb->read_locks)); eb 60 fs/btrfs/locking.c static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) eb 62 fs/btrfs/locking.c eb->write_locks++; eb 65 fs/btrfs/locking.c static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) eb 67 fs/btrfs/locking.c eb->write_locks--; eb 70 fs/btrfs/locking.c void btrfs_assert_tree_locked(struct extent_buffer *eb) eb 72 fs/btrfs/locking.c BUG_ON(!eb->write_locks); eb 76 fs/btrfs/locking.c static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { } eb 77 fs/btrfs/locking.c static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { } eb 78 fs/btrfs/locking.c static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { } eb 79 fs/btrfs/locking.c static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { } eb 80 fs/btrfs/locking.c static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { } eb 81 fs/btrfs/locking.c static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { } eb 82 fs/btrfs/locking.c static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { } eb 83 fs/btrfs/locking.c static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { } eb 84 fs/btrfs/locking.c void btrfs_assert_tree_locked(struct extent_buffer *eb) { } eb 85 fs/btrfs/locking.c static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { } eb 86 fs/btrfs/locking.c static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { } eb 89 fs/btrfs/locking.c void btrfs_set_lock_blocking_read(struct extent_buffer *eb) eb 91 fs/btrfs/locking.c trace_btrfs_set_lock_blocking_read(eb); eb 97 fs/btrfs/locking.c if (eb->lock_nested && current->pid == eb->lock_owner) eb 99 fs/btrfs/locking.c btrfs_assert_tree_read_locked(eb); eb 100 fs/btrfs/locking.c atomic_inc(&eb->blocking_readers); eb 101 fs/btrfs/locking.c btrfs_assert_spinning_readers_put(eb); eb 102 fs/btrfs/locking.c read_unlock(&eb->lock); eb 105 fs/btrfs/locking.c void btrfs_set_lock_blocking_write(struct extent_buffer *eb) eb 107 fs/btrfs/locking.c trace_btrfs_set_lock_blocking_write(eb); eb 113 fs/btrfs/locking.c if (eb->lock_nested && current->pid == eb->lock_owner) eb 115 fs/btrfs/locking.c if (eb->blocking_writers == 0) { eb 116 fs/btrfs/locking.c btrfs_assert_spinning_writers_put(eb); eb 117 fs/btrfs/locking.c btrfs_assert_tree_locked(eb); eb 118 fs/btrfs/locking.c eb->blocking_writers++; eb 119 fs/btrfs/locking.c write_unlock(&eb->lock); eb 127 fs/btrfs/locking.c void btrfs_tree_read_lock(struct extent_buffer *eb) eb 134 fs/btrfs/locking.c read_lock(&eb->lock); eb 135 fs/btrfs/locking.c BUG_ON(eb->blocking_writers == 0 && eb 136 fs/btrfs/locking.c current->pid == eb->lock_owner); eb 137 fs/btrfs/locking.c if (eb->blocking_writers && current->pid == eb->lock_owner) { eb 144 fs/btrfs/locking.c BUG_ON(eb->lock_nested); eb 145 fs/btrfs/locking.c eb->lock_nested = true; eb 146 fs/btrfs/locking.c read_unlock(&eb->lock); eb 147 fs/btrfs/locking.c trace_btrfs_tree_read_lock(eb, start_ns); eb 150 fs/btrfs/locking.c if (eb->blocking_writers) { eb 151 fs/btrfs/locking.c read_unlock(&eb->lock); eb 152 fs/btrfs/locking.c wait_event(eb->write_lock_wq, eb 153 fs/btrfs/locking.c eb->blocking_writers == 0); eb 156 fs/btrfs/locking.c btrfs_assert_tree_read_locks_get(eb); eb 157 fs/btrfs/locking.c btrfs_assert_spinning_readers_get(eb); eb 158 fs/btrfs/locking.c trace_btrfs_tree_read_lock(eb, start_ns); eb 166 fs/btrfs/locking.c int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) eb 168 fs/btrfs/locking.c if (eb->blocking_writers) eb 171 fs/btrfs/locking.c read_lock(&eb->lock); eb 172 fs/btrfs/locking.c if (eb->blocking_writers) { eb 173 fs/btrfs/locking.c read_unlock(&eb->lock); eb 176 fs/btrfs/locking.c btrfs_assert_tree_read_locks_get(eb); eb 177 fs/btrfs/locking.c btrfs_assert_spinning_readers_get(eb); eb 178 fs/btrfs/locking.c trace_btrfs_tree_read_lock_atomic(eb); eb 186 fs/btrfs/locking.c int btrfs_try_tree_read_lock(struct extent_buffer *eb) eb 188 fs/btrfs/locking.c if (eb->blocking_writers) eb 191 fs/btrfs/locking.c if (!read_trylock(&eb->lock)) eb 194 fs/btrfs/locking.c if (eb->blocking_writers) { eb 195 fs/btrfs/locking.c read_unlock(&eb->lock); eb 198 fs/btrfs/locking.c btrfs_assert_tree_read_locks_get(eb); eb 199 fs/btrfs/locking.c btrfs_assert_spinning_readers_get(eb); eb 200 fs/btrfs/locking.c trace_btrfs_try_tree_read_lock(eb); eb 208 fs/btrfs/locking.c int btrfs_try_tree_write_lock(struct extent_buffer *eb) eb 210 fs/btrfs/locking.c if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) eb 213 fs/btrfs/locking.c write_lock(&eb->lock); eb 214 fs/btrfs/locking.c if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) { eb 215 fs/btrfs/locking.c write_unlock(&eb->lock); eb 218 fs/btrfs/locking.c btrfs_assert_tree_write_locks_get(eb); eb 219 fs/btrfs/locking.c btrfs_assert_spinning_writers_get(eb); eb 220 fs/btrfs/locking.c eb->lock_owner = current->pid; eb 221 fs/btrfs/locking.c trace_btrfs_try_tree_write_lock(eb); eb 228 fs/btrfs/locking.c void btrfs_tree_read_unlock(struct extent_buffer *eb) eb 230 fs/btrfs/locking.c trace_btrfs_tree_read_unlock(eb); eb 237 fs/btrfs/locking.c if (eb->lock_nested && current->pid == eb->lock_owner) { eb 238 fs/btrfs/locking.c eb->lock_nested = false; eb 241 fs/btrfs/locking.c btrfs_assert_tree_read_locked(eb); eb 242 fs/btrfs/locking.c btrfs_assert_spinning_readers_put(eb); eb 243 fs/btrfs/locking.c btrfs_assert_tree_read_locks_put(eb); eb 244 fs/btrfs/locking.c read_unlock(&eb->lock); eb 250 fs/btrfs/locking.c void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) eb 252 fs/btrfs/locking.c trace_btrfs_tree_read_unlock_blocking(eb); eb 259 fs/btrfs/locking.c if (eb->lock_nested && current->pid == eb->lock_owner) { eb 260 fs/btrfs/locking.c eb->lock_nested = false; eb 263 fs/btrfs/locking.c btrfs_assert_tree_read_locked(eb); eb 264 fs/btrfs/locking.c WARN_ON(atomic_read(&eb->blocking_readers) == 0); eb 266 fs/btrfs/locking.c if (atomic_dec_and_test(&eb->blocking_readers)) eb 267 fs/btrfs/locking.c cond_wake_up_nomb(&eb->read_lock_wq); eb 268 fs/btrfs/locking.c btrfs_assert_tree_read_locks_put(eb); eb 275 fs/btrfs/locking.c void btrfs_tree_lock(struct extent_buffer *eb) eb 282 fs/btrfs/locking.c WARN_ON(eb->lock_owner == current->pid); eb 284 fs/btrfs/locking.c wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); eb 285 fs/btrfs/locking.c wait_event(eb->write_lock_wq, eb->blocking_writers == 0); eb 286 fs/btrfs/locking.c write_lock(&eb->lock); eb 287 fs/btrfs/locking.c if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) { eb 288 fs/btrfs/locking.c write_unlock(&eb->lock); eb 291 fs/btrfs/locking.c btrfs_assert_spinning_writers_get(eb); eb 292 fs/btrfs/locking.c btrfs_assert_tree_write_locks_get(eb); eb 293 fs/btrfs/locking.c eb->lock_owner = current->pid; eb 294 fs/btrfs/locking.c trace_btrfs_tree_lock(eb, start_ns); eb 300 fs/btrfs/locking.c void btrfs_tree_unlock(struct extent_buffer *eb) eb 302 fs/btrfs/locking.c int blockers = eb->blocking_writers; eb 306 fs/btrfs/locking.c btrfs_assert_tree_locked(eb); eb 307 fs/btrfs/locking.c trace_btrfs_tree_unlock(eb); eb 308 fs/btrfs/locking.c eb->lock_owner = 0; eb 309 fs/btrfs/locking.c btrfs_assert_tree_write_locks_put(eb); eb 312 fs/btrfs/locking.c btrfs_assert_no_spinning_writers(eb); eb 313 fs/btrfs/locking.c eb->blocking_writers--; eb 319 fs/btrfs/locking.c cond_wake_up(&eb->write_lock_wq); eb 321 fs/btrfs/locking.c btrfs_assert_spinning_writers_put(eb); eb 322 fs/btrfs/locking.c write_unlock(&eb->lock); eb 14 fs/btrfs/locking.h void btrfs_tree_lock(struct extent_buffer *eb); eb 15 fs/btrfs/locking.h void btrfs_tree_unlock(struct extent_buffer *eb); eb 17 fs/btrfs/locking.h void btrfs_tree_read_lock(struct extent_buffer *eb); eb 18 fs/btrfs/locking.h void btrfs_tree_read_unlock(struct extent_buffer *eb); eb 19 fs/btrfs/locking.h void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb); eb 20 fs/btrfs/locking.h void btrfs_set_lock_blocking_read(struct extent_buffer *eb); eb 21 fs/btrfs/locking.h void btrfs_set_lock_blocking_write(struct extent_buffer *eb); eb 22 fs/btrfs/locking.h void btrfs_assert_tree_locked(struct extent_buffer *eb); eb 23 fs/btrfs/locking.h int btrfs_try_tree_read_lock(struct extent_buffer *eb); eb 24 fs/btrfs/locking.h int btrfs_try_tree_write_lock(struct extent_buffer *eb); eb 25 fs/btrfs/locking.h int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); eb 28 fs/btrfs/locking.h static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) eb 31 fs/btrfs/locking.h btrfs_tree_unlock(eb); eb 33 fs/btrfs/locking.h btrfs_tree_read_unlock_blocking(eb); eb 35 fs/btrfs/locking.h btrfs_tree_read_unlock(eb); eb 10 fs/btrfs/print-tree.c static void print_chunk(struct extent_buffer *eb, struct btrfs_chunk *chunk) eb 12 fs/btrfs/print-tree.c int num_stripes = btrfs_chunk_num_stripes(eb, chunk); eb 15 fs/btrfs/print-tree.c btrfs_chunk_length(eb, chunk), btrfs_chunk_owner(eb, chunk), eb 16 fs/btrfs/print-tree.c btrfs_chunk_type(eb, chunk), num_stripes); eb 19 fs/btrfs/print-tree.c btrfs_stripe_devid_nr(eb, chunk, i), eb 20 fs/btrfs/print-tree.c btrfs_stripe_offset_nr(eb, chunk, i)); eb 23 fs/btrfs/print-tree.c static void print_dev_item(struct extent_buffer *eb, eb 27 fs/btrfs/print-tree.c btrfs_device_id(eb, dev_item), eb 28 fs/btrfs/print-tree.c btrfs_device_total_bytes(eb, dev_item), eb 29 fs/btrfs/print-tree.c btrfs_device_bytes_used(eb, dev_item)); eb 31 fs/btrfs/print-tree.c static void print_extent_data_ref(struct extent_buffer *eb, eb 35 fs/btrfs/print-tree.c btrfs_extent_data_ref_root(eb, ref), eb 36 fs/btrfs/print-tree.c btrfs_extent_data_ref_objectid(eb, ref), eb 37 fs/btrfs/print-tree.c btrfs_extent_data_ref_offset(eb, ref), eb 38 fs/btrfs/print-tree.c btrfs_extent_data_ref_count(eb, ref)); eb 41 fs/btrfs/print-tree.c static void print_extent_item(struct extent_buffer *eb, int slot, int type) eb 50 fs/btrfs/print-tree.c u32 item_size = btrfs_item_size_nr(eb, slot); eb 56 fs/btrfs/print-tree.c btrfs_print_v0_err(eb->fs_info); eb 57 fs/btrfs/print-tree.c btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL); eb 60 fs/btrfs/print-tree.c ei = btrfs_item_ptr(eb, slot, struct btrfs_extent_item); eb 61 fs/btrfs/print-tree.c flags = btrfs_extent_flags(eb, ei); eb 64 fs/btrfs/print-tree.c btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei), eb 71 fs/btrfs/print-tree.c btrfs_tree_block_key(eb, info, &key); eb 75 fs/btrfs/print-tree.c btrfs_tree_block_level(eb, info)); eb 85 fs/btrfs/print-tree.c type = btrfs_extent_inline_ref_type(eb, iref); eb 86 fs/btrfs/print-tree.c offset = btrfs_extent_inline_ref_offset(eb, iref); eb 98 fs/btrfs/print-tree.c if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) eb 100 fs/btrfs/print-tree.c offset, (unsigned long long)eb->fs_info->nodesize); eb 104 fs/btrfs/print-tree.c print_extent_data_ref(eb, dref); eb 109 fs/btrfs/print-tree.c offset, btrfs_shared_data_ref_count(eb, sref)); eb 114 fs/btrfs/print-tree.c if (!IS_ALIGNED(offset, eb->fs_info->nodesize)) eb 116 fs/btrfs/print-tree.c offset, (unsigned long long)eb->fs_info->nodesize); eb 120 fs/btrfs/print-tree.c eb->start, type); eb 151 fs/btrfs/print-tree.c static void print_eb_refs_lock(struct extent_buffer *eb) eb 154 fs/btrfs/print-tree.c btrfs_info(eb->fs_info, eb 156 fs/btrfs/print-tree.c atomic_read(&eb->refs), eb->write_locks, eb 157 fs/btrfs/print-tree.c atomic_read(&eb->read_locks), eb 158 fs/btrfs/print-tree.c eb->blocking_writers, eb 159 fs/btrfs/print-tree.c atomic_read(&eb->blocking_readers), eb 160 fs/btrfs/print-tree.c eb->spinning_writers, eb 161 fs/btrfs/print-tree.c atomic_read(&eb->spinning_readers), eb 162 fs/btrfs/print-tree.c eb->lock_owner, current->pid); eb 1637 fs/btrfs/qgroup.c struct extent_buffer *eb) eb 1640 fs/btrfs/qgroup.c int nr = btrfs_header_nritems(eb); eb 1651 fs/btrfs/qgroup.c btrfs_item_key_to_cpu(eb, &key, i); eb 1656 fs/btrfs/qgroup.c fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); eb 1658 fs/btrfs/qgroup.c extent_type = btrfs_file_extent_type(eb, fi); eb 1663 fs/btrfs/qgroup.c bytenr = btrfs_file_extent_disk_bytenr(eb, fi); eb 1667 fs/btrfs/qgroup.c num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); eb 1696 fs/btrfs/qgroup.c struct extent_buffer *eb; eb 1702 fs/btrfs/qgroup.c eb = path->nodes[level]; eb 1703 fs/btrfs/qgroup.c nr = btrfs_header_nritems(eb); eb 1713 fs/btrfs/qgroup.c btrfs_tree_unlock_rw(eb, path->locks[level]); eb 1716 fs/btrfs/qgroup.c free_extent_buffer(eb); eb 1732 fs/btrfs/qgroup.c eb = path->nodes[root_level]; eb 1733 fs/btrfs/qgroup.c if (path->slots[root_level] >= btrfs_header_nritems(eb)) eb 1827 fs/btrfs/qgroup.c struct extent_buffer *eb; eb 1832 fs/btrfs/qgroup.c eb = src_path->nodes[cur_level + 1]; eb 1834 fs/btrfs/qgroup.c child_bytenr = btrfs_node_blockptr(eb, parent_slot); eb 1835 fs/btrfs/qgroup.c child_gen = btrfs_node_ptr_generation(eb, parent_slot); eb 1836 fs/btrfs/qgroup.c btrfs_node_key_to_cpu(eb, &first_key, parent_slot); eb 1838 fs/btrfs/qgroup.c eb = read_tree_block(fs_info, child_bytenr, child_gen, eb 1840 fs/btrfs/qgroup.c if (IS_ERR(eb)) { eb 1841 fs/btrfs/qgroup.c ret = PTR_ERR(eb); eb 1843 fs/btrfs/qgroup.c } else if (!extent_buffer_uptodate(eb)) { eb 1844 fs/btrfs/qgroup.c free_extent_buffer(eb); eb 1849 fs/btrfs/qgroup.c src_path->nodes[cur_level] = eb; eb 1851 fs/btrfs/qgroup.c btrfs_tree_read_lock(eb); eb 1852 fs/btrfs/qgroup.c btrfs_set_lock_blocking_read(eb); eb 1931 fs/btrfs/qgroup.c struct extent_buffer *eb; eb 1968 fs/btrfs/qgroup.c eb = dst_path->nodes[cur_level + 1]; eb 1970 fs/btrfs/qgroup.c child_bytenr = btrfs_node_blockptr(eb, parent_slot); eb 1971 fs/btrfs/qgroup.c child_gen = btrfs_node_ptr_generation(eb, parent_slot); eb 1972 fs/btrfs/qgroup.c btrfs_node_key_to_cpu(eb, &first_key, parent_slot); eb 1978 fs/btrfs/qgroup.c eb = read_tree_block(fs_info, child_bytenr, child_gen, eb 1980 fs/btrfs/qgroup.c if (IS_ERR(eb)) { eb 1981 fs/btrfs/qgroup.c ret = PTR_ERR(eb); eb 1983 fs/btrfs/qgroup.c } else if (!extent_buffer_uptodate(eb)) { eb 1984 fs/btrfs/qgroup.c free_extent_buffer(eb); eb 1989 fs/btrfs/qgroup.c dst_path->nodes[cur_level] = eb; eb 1992 fs/btrfs/qgroup.c btrfs_tree_read_lock(eb); eb 1993 fs/btrfs/qgroup.c btrfs_set_lock_blocking_read(eb); eb 2004 fs/btrfs/qgroup.c eb = dst_path->nodes[cur_level]; eb 2008 fs/btrfs/qgroup.c for (i = 0; i < btrfs_header_nritems(eb); i++) { eb 2010 fs/btrfs/qgroup.c if (btrfs_node_ptr_generation(eb, i) < last_snapshot) eb 2097 fs/btrfs/qgroup.c struct extent_buffer *eb = root_eb; eb 2147 fs/btrfs/qgroup.c eb = path->nodes[level + 1]; eb 2149 fs/btrfs/qgroup.c child_bytenr = btrfs_node_blockptr(eb, parent_slot); eb 2150 fs/btrfs/qgroup.c child_gen = btrfs_node_ptr_generation(eb, parent_slot); eb 2151 fs/btrfs/qgroup.c btrfs_node_key_to_cpu(eb, &first_key, parent_slot); eb 2153 fs/btrfs/qgroup.c eb = read_tree_block(fs_info, child_bytenr, child_gen, eb 2155 fs/btrfs/qgroup.c if (IS_ERR(eb)) { eb 2156 fs/btrfs/qgroup.c ret = PTR_ERR(eb); eb 2158 fs/btrfs/qgroup.c } else if (!extent_buffer_uptodate(eb)) { eb 2159 fs/btrfs/qgroup.c free_extent_buffer(eb); eb 2164 fs/btrfs/qgroup.c path->nodes[level] = eb; eb 2167 fs/btrfs/qgroup.c btrfs_tree_read_lock(eb); eb 2168 fs/btrfs/qgroup.c btrfs_set_lock_blocking_read(eb); eb 316 fs/btrfs/qgroup.h struct extent_buffer *eb); eb 416 fs/btrfs/qgroup.h struct btrfs_root *root, struct extent_buffer *eb); eb 95 fs/btrfs/reada.c struct reada_extent *re, struct extent_buffer *eb, eb 128 fs/btrfs/reada.c if (!btrfs_header_level(eb)) eb 131 fs/btrfs/reada.c nritems = btrfs_header_nritems(eb); eb 132 fs/btrfs/reada.c generation = btrfs_header_generation(eb); eb 139 fs/btrfs/reada.c btrfs_node_key_to_cpu(eb, &key, i); eb 141 fs/btrfs/reada.c btrfs_node_key_to_cpu(eb, &next_key, i + 1); eb 144 fs/btrfs/reada.c bytenr = btrfs_node_blockptr(eb, i); eb 145 fs/btrfs/reada.c n_gen = btrfs_node_ptr_generation(eb, i); eb 198 fs/btrfs/reada.c int btree_readahead_hook(struct extent_buffer *eb, int err) eb 200 fs/btrfs/reada.c struct btrfs_fs_info *fs_info = eb->fs_info; eb 207 fs/btrfs/reada.c eb->start >> PAGE_SHIFT); eb 216 fs/btrfs/reada.c __readahead_hook(fs_info, re, eb, err); eb 643 fs/btrfs/reada.c int mirror_num, struct extent_buffer **eb) eb 664 fs/btrfs/reada.c *eb = buf; eb 676 fs/btrfs/reada.c struct extent_buffer *eb = NULL; eb 736 fs/btrfs/reada.c ret = reada_tree_block_flagged(fs_info, logical, mirror_num, &eb); eb 739 fs/btrfs/reada.c else if (eb) eb 740 fs/btrfs/reada.c __readahead_hook(fs_info, re, eb, ret); eb 742 fs/btrfs/reada.c if (eb) eb 743 fs/btrfs/reada.c free_extent_buffer(eb); eb 553 fs/btrfs/ref-verify.c struct extent_buffer *eb; eb 567 fs/btrfs/ref-verify.c eb = read_tree_block(fs_info, block_bytenr, gen, eb 569 fs/btrfs/ref-verify.c if (IS_ERR(eb)) eb 570 fs/btrfs/ref-verify.c return PTR_ERR(eb); eb 571 fs/btrfs/ref-verify.c if (!extent_buffer_uptodate(eb)) { eb 572 fs/btrfs/ref-verify.c free_extent_buffer(eb); eb 575 fs/btrfs/ref-verify.c btrfs_tree_read_lock(eb); eb 576 fs/btrfs/ref-verify.c btrfs_set_lock_blocking_read(eb); eb 577 fs/btrfs/ref-verify.c path->nodes[level-1] = eb; eb 980 fs/btrfs/ref-verify.c struct extent_buffer *eb; eb 991 fs/btrfs/ref-verify.c eb = btrfs_read_lock_root_node(fs_info->extent_root); eb 992 fs/btrfs/ref-verify.c btrfs_set_lock_blocking_read(eb); eb 993 fs/btrfs/ref-verify.c level = btrfs_header_level(eb); eb 994 fs/btrfs/ref-verify.c path->nodes[level] = eb; eb 53 fs/btrfs/relocation.c struct extent_buffer *eb; eb 388 fs/btrfs/relocation.c btrfs_tree_unlock(node->eb); eb 395 fs/btrfs/relocation.c if (node->eb) { eb 397 fs/btrfs/relocation.c free_extent_buffer(node->eb); eb 398 fs/btrfs/relocation.c node->eb = NULL; eb 689 fs/btrfs/relocation.c struct extent_buffer *eb; eb 770 fs/btrfs/relocation.c eb = path1->nodes[0]; eb 773 fs/btrfs/relocation.c if (path1->slots[0] >= btrfs_header_nritems(eb)) { eb 781 fs/btrfs/relocation.c eb = path1->nodes[0]; eb 784 fs/btrfs/relocation.c btrfs_item_key_to_cpu(eb, &key, path1->slots[0]); eb 792 fs/btrfs/relocation.c ret = find_inline_backref(eb, path1->slots[0], eb 804 fs/btrfs/relocation.c type = btrfs_get_extent_inline_ref_type(eb, iref, eb 811 fs/btrfs/relocation.c key.offset = btrfs_extent_inline_ref_offset(eb, iref); eb 924 fs/btrfs/relocation.c eb = path2->nodes[level]; eb 925 fs/btrfs/relocation.c if (btrfs_node_blockptr(eb, path2->slots[level]) != eb 957 fs/btrfs/relocation.c eb = path2->nodes[level]; eb 958 fs/btrfs/relocation.c rb_node = tree_search(&cache->rb_root, eb->start); eb 966 fs/btrfs/relocation.c upper->bytenr = eb->start; eb 967 fs/btrfs/relocation.c upper->owner = btrfs_header_owner(eb); eb 977 fs/btrfs/relocation.c if (btrfs_block_can_be_shared(root, eb)) eb 1003 fs/btrfs/relocation.c upper->owner = btrfs_header_owner(eb); eb 1389 fs/btrfs/relocation.c struct extent_buffer *eb; eb 1405 fs/btrfs/relocation.c ret = btrfs_copy_root(trans, root, root->commit_root, &eb, eb 1426 fs/btrfs/relocation.c ret = btrfs_copy_root(trans, root, root->node, &eb, eb 1432 fs/btrfs/relocation.c btrfs_set_root_bytenr(root_item, eb->start); eb 1433 fs/btrfs/relocation.c btrfs_set_root_level(root_item, btrfs_header_level(eb)); eb 1443 fs/btrfs/relocation.c btrfs_tree_unlock(eb); eb 1444 fs/btrfs/relocation.c free_extent_buffer(eb); eb 1785 fs/btrfs/relocation.c int memcmp_node_keys(struct extent_buffer *eb, int slot, eb 1790 fs/btrfs/relocation.c btrfs_node_key(eb, &key1, slot); eb 1811 fs/btrfs/relocation.c struct extent_buffer *eb; eb 1834 fs/btrfs/relocation.c eb = btrfs_lock_root_node(dest); eb 1835 fs/btrfs/relocation.c btrfs_set_lock_blocking_write(eb); eb 1836 fs/btrfs/relocation.c level = btrfs_header_level(eb); eb 1839 fs/btrfs/relocation.c btrfs_tree_unlock(eb); eb 1840 fs/btrfs/relocation.c free_extent_buffer(eb); eb 1845 fs/btrfs/relocation.c ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb); eb 1848 fs/btrfs/relocation.c btrfs_set_lock_blocking_write(eb); eb 1856 fs/btrfs/relocation.c parent = eb; eb 1878 fs/btrfs/relocation.c eb = path->nodes[level]; eb 1879 fs/btrfs/relocation.c new_bytenr = btrfs_node_blockptr(eb, eb 1881 fs/btrfs/relocation.c new_ptr_gen = btrfs_node_ptr_generation(eb, eb 1900 fs/btrfs/relocation.c eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen, eb 1902 fs/btrfs/relocation.c if (IS_ERR(eb)) { eb 1903 fs/btrfs/relocation.c ret = PTR_ERR(eb); eb 1905 fs/btrfs/relocation.c } else if (!extent_buffer_uptodate(eb)) { eb 1907 fs/btrfs/relocation.c free_extent_buffer(eb); eb 1910 fs/btrfs/relocation.c btrfs_tree_lock(eb); eb 1912 fs/btrfs/relocation.c ret = btrfs_cow_block(trans, dest, eb, parent, eb 1913 fs/btrfs/relocation.c slot, &eb); eb 1916 fs/btrfs/relocation.c btrfs_set_lock_blocking_write(eb); eb 1921 fs/btrfs/relocation.c parent = eb; eb 2018 fs/btrfs/relocation.c struct extent_buffer *eb; eb 2031 fs/btrfs/relocation.c eb = path->nodes[i]; eb 2032 fs/btrfs/relocation.c nritems = btrfs_header_nritems(eb); eb 2035 fs/btrfs/relocation.c if (btrfs_node_ptr_generation(eb, path->slots[i]) <= eb 2056 fs/btrfs/relocation.c struct extent_buffer *eb = NULL; eb 2068 fs/btrfs/relocation.c eb = path->nodes[i]; eb 2069 fs/btrfs/relocation.c nritems = btrfs_header_nritems(eb); eb 2071 fs/btrfs/relocation.c ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]); eb 2087 fs/btrfs/relocation.c bytenr = btrfs_node_blockptr(eb, path->slots[i]); eb 2088 fs/btrfs/relocation.c btrfs_node_key_to_cpu(eb, &first_key, path->slots[i]); eb 2089 fs/btrfs/relocation.c eb = read_tree_block(fs_info, bytenr, ptr_gen, i - 1, eb 2091 fs/btrfs/relocation.c if (IS_ERR(eb)) { eb 2092 fs/btrfs/relocation.c return PTR_ERR(eb); eb 2093 fs/btrfs/relocation.c } else if (!extent_buffer_uptodate(eb)) { eb 2094 fs/btrfs/relocation.c free_extent_buffer(eb); eb 2097 fs/btrfs/relocation.c BUG_ON(btrfs_header_level(eb) != i - 1); eb 2098 fs/btrfs/relocation.c path->nodes[i - 1] = eb; eb 2803 fs/btrfs/relocation.c struct extent_buffer *eb; eb 2811 fs/btrfs/relocation.c BUG_ON(lowest && node->eb); eb 2825 fs/btrfs/relocation.c if (upper->eb && !upper->locked) { eb 2827 fs/btrfs/relocation.c ret = btrfs_bin_search(upper->eb, key, eb 2834 fs/btrfs/relocation.c bytenr = btrfs_node_blockptr(upper->eb, slot); eb 2835 fs/btrfs/relocation.c if (node->eb->start == bytenr) eb 2841 fs/btrfs/relocation.c if (!upper->eb) { eb 2853 fs/btrfs/relocation.c if (!upper->eb) { eb 2854 fs/btrfs/relocation.c upper->eb = path->nodes[upper->level]; eb 2857 fs/btrfs/relocation.c BUG_ON(upper->eb != path->nodes[upper->level]); eb 2866 fs/btrfs/relocation.c ret = btrfs_bin_search(upper->eb, key, upper->level, eb 2875 fs/btrfs/relocation.c bytenr = btrfs_node_blockptr(upper->eb, slot); eb 2881 fs/btrfs/relocation.c upper->eb->start); eb 2886 fs/btrfs/relocation.c if (node->eb->start == bytenr) eb 2891 fs/btrfs/relocation.c generation = btrfs_node_ptr_generation(upper->eb, slot); eb 2892 fs/btrfs/relocation.c btrfs_node_key_to_cpu(upper->eb, &first_key, slot); eb 2893 fs/btrfs/relocation.c eb = read_tree_block(fs_info, bytenr, generation, eb 2895 fs/btrfs/relocation.c if (IS_ERR(eb)) { eb 2896 fs/btrfs/relocation.c err = PTR_ERR(eb); eb 2898 fs/btrfs/relocation.c } else if (!extent_buffer_uptodate(eb)) { eb 2899 fs/btrfs/relocation.c free_extent_buffer(eb); eb 2903 fs/btrfs/relocation.c btrfs_tree_lock(eb); eb 2904 fs/btrfs/relocation.c btrfs_set_lock_blocking_write(eb); eb 2906 fs/btrfs/relocation.c if (!node->eb) { eb 2907 fs/btrfs/relocation.c ret = btrfs_cow_block(trans, root, eb, upper->eb, eb 2908 fs/btrfs/relocation.c slot, &eb); eb 2909 fs/btrfs/relocation.c btrfs_tree_unlock(eb); eb 2910 fs/btrfs/relocation.c free_extent_buffer(eb); eb 2915 fs/btrfs/relocation.c BUG_ON(node->eb != eb); eb 2917 fs/btrfs/relocation.c btrfs_set_node_blockptr(upper->eb, slot, eb 2918 fs/btrfs/relocation.c node->eb->start); eb 2919 fs/btrfs/relocation.c btrfs_set_node_ptr_generation(upper->eb, slot, eb 2921 fs/btrfs/relocation.c btrfs_mark_buffer_dirty(upper->eb); eb 2924 fs/btrfs/relocation.c node->eb->start, blocksize, eb 2925 fs/btrfs/relocation.c upper->eb->start); eb 2928 fs/btrfs/relocation.c btrfs_header_owner(upper->eb)); eb 2932 fs/btrfs/relocation.c ret = btrfs_drop_subtree(trans, root, eb, upper->eb); eb 2962 fs/btrfs/relocation.c btrfs_node_key_to_cpu(node->eb, &key, 0); eb 3058 fs/btrfs/relocation.c struct extent_buffer *eb; eb 3061 fs/btrfs/relocation.c eb = read_tree_block(fs_info, block->bytenr, block->key.offset, eb 3063 fs/btrfs/relocation.c if (IS_ERR(eb)) { eb 3064 fs/btrfs/relocation.c return PTR_ERR(eb); eb 3065 fs/btrfs/relocation.c } else if (!extent_buffer_uptodate(eb)) { eb 3066 fs/btrfs/relocation.c free_extent_buffer(eb); eb 3070 fs/btrfs/relocation.c btrfs_item_key_to_cpu(eb, &block->key, 0); eb 3072 fs/btrfs/relocation.c btrfs_node_key_to_cpu(eb, &block->key, 0); eb 3073 fs/btrfs/relocation.c free_extent_buffer(eb); eb 3452 fs/btrfs/relocation.c struct extent_buffer *eb; eb 3461 fs/btrfs/relocation.c eb = path->nodes[0]; eb 3462 fs/btrfs/relocation.c item_size = btrfs_item_size_nr(eb, path->slots[0]); eb 3466 fs/btrfs/relocation.c ei = btrfs_item_ptr(eb, path->slots[0], eb 3470 fs/btrfs/relocation.c level = btrfs_tree_block_level(eb, bi); eb 3474 fs/btrfs/relocation.c generation = btrfs_extent_generation(eb, ei); eb 3476 fs/btrfs/relocation.c btrfs_print_v0_err(eb->fs_info); eb 3477 fs/btrfs/relocation.c btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL); eb 3581 fs/btrfs/relocation.c struct extent_buffer *eb) eb 3586 fs/btrfs/relocation.c if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) || eb 3587 fs/btrfs/relocation.c btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV) eb 3591 fs/btrfs/relocation.c eb->start, btrfs_header_level(eb), 1, eb 3823 fs/btrfs/relocation.c struct extent_buffer *eb; eb 3832 fs/btrfs/relocation.c eb = path->nodes[0]; eb 3833 fs/btrfs/relocation.c ptr = btrfs_item_ptr_offset(eb, path->slots[0]); eb 3834 fs/btrfs/relocation.c end = ptr + btrfs_item_size_nr(eb, path->slots[0]); eb 3839 fs/btrfs/relocation.c key.type = btrfs_get_extent_inline_ref_type(eb, iref, eb 3842 fs/btrfs/relocation.c key.offset = btrfs_extent_inline_ref_offset(eb, iref); eb 3848 fs/btrfs/relocation.c eb, dref, blocks); eb 3853 fs/btrfs/relocation.c eb->start, path->slots[0]); eb 3865 fs/btrfs/relocation.c eb = path->nodes[0]; eb 3866 fs/btrfs/relocation.c if (path->slots[0] >= btrfs_header_nritems(eb)) { eb 3874 fs/btrfs/relocation.c eb = path->nodes[0]; eb 3877 fs/btrfs/relocation.c btrfs_item_key_to_cpu(eb, &key, path->slots[0]); eb 3885 fs/btrfs/relocation.c dref = btrfs_item_ptr(eb, path->slots[0], eb 3888 fs/btrfs/relocation.c eb, dref, blocks); eb 3890 fs/btrfs/relocation.c btrfs_print_v0_err(eb->fs_info); eb 3891 fs/btrfs/relocation.c btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL); eb 4739 fs/btrfs/relocation.c node->eb = cow; eb 22 fs/btrfs/root-tree.c static void btrfs_read_root_item(struct extent_buffer *eb, int slot, eb 29 fs/btrfs/root-tree.c len = btrfs_item_size_nr(eb, slot); eb 30 fs/btrfs/root-tree.c read_extent_buffer(eb, item, btrfs_item_ptr_offset(eb, slot), eb 37 fs/btrfs/root-tree.c btrfs_warn(eb->fs_info, eb 644 fs/btrfs/scrub.c struct extent_buffer *eb; eb 675 fs/btrfs/scrub.c eb = swarn->path->nodes[0]; eb 676 fs/btrfs/scrub.c inode_item = btrfs_item_ptr(eb, swarn->path->slots[0], eb 678 fs/btrfs/scrub.c isize = btrfs_inode_size(eb, inode_item); eb 679 fs/btrfs/scrub.c nlink = btrfs_inode_nlink(eb, inode_item); eb 735 fs/btrfs/scrub.c struct extent_buffer *eb; eb 767 fs/btrfs/scrub.c eb = path->nodes[0]; eb 768 fs/btrfs/scrub.c ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); eb 769 fs/btrfs/scrub.c item_size = btrfs_item_size_nr(eb, path->slots[0]); eb 773 fs/btrfs/scrub.c ret = tree_backref_for_extent(&ptr, eb, &found_key, ei, eb 496 fs/btrfs/send.c struct extent_buffer *eb, eb 506 fs/btrfs/send.c read_extent_buffer(eb, prepared, off, len); eb 619 fs/btrfs/send.c struct extent_buffer *eb, eb 623 fs/btrfs/send.c read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts)); eb 665 fs/btrfs/send.c #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \ eb 667 fs/btrfs/send.c ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \ eb 893 fs/btrfs/send.c struct extent_buffer *eb = path->nodes[0]; eb 924 fs/btrfs/send.c ptr = (unsigned long)btrfs_item_ptr(eb, slot, eb 927 fs/btrfs/send.c total = btrfs_item_size(eb, item); eb 930 fs/btrfs/send.c ptr = btrfs_item_ptr_offset(eb, slot); eb 931 fs/btrfs/send.c total = btrfs_item_size_nr(eb, slot); eb 940 fs/btrfs/send.c name_len = btrfs_inode_ref_name_len(eb, iref); eb 942 fs/btrfs/send.c index = btrfs_inode_ref_index(eb, iref); eb 946 fs/btrfs/send.c name_len = btrfs_inode_extref_name_len(eb, extref); eb 948 fs/btrfs/send.c index = btrfs_inode_extref_index(eb, extref); eb 949 fs/btrfs/send.c dir = btrfs_inode_extref_parent(eb, extref); eb 954 fs/btrfs/send.c name_off, eb, dir, eb 968 fs/btrfs/send.c eb, dir, eb 978 fs/btrfs/send.c ret = fs_path_add_from_extent_buffer(p, eb, name_off, eb 1013 fs/btrfs/send.c struct extent_buffer *eb; eb 1041 fs/btrfs/send.c eb = path->nodes[0]; eb 1044 fs/btrfs/send.c di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); eb 1047 fs/btrfs/send.c total = btrfs_item_size(eb, item); eb 1051 fs/btrfs/send.c name_len = btrfs_dir_name_len(eb, di); eb 1052 fs/btrfs/send.c data_len = btrfs_dir_data_len(eb, di); eb 1053 fs/btrfs/send.c type = btrfs_dir_type(eb, di); eb 1054 fs/btrfs/send.c btrfs_dir_item_key_to_cpu(eb, di, &di_key); eb 1098 fs/btrfs/send.c read_extent_buffer(eb, buf, (unsigned long)(di + 1), eb 1308 fs/btrfs/send.c struct extent_buffer *eb = path->nodes[0]; eb 1340 fs/btrfs/send.c fi = btrfs_item_ptr(eb, path->slots[0], eb 1342 fs/btrfs/send.c extent_type = btrfs_file_extent_type(eb, fi); eb 1347 fs/btrfs/send.c compressed = btrfs_file_extent_compression(eb, fi); eb 1349 fs/btrfs/send.c num_bytes = btrfs_file_extent_num_bytes(eb, fi); eb 1350 fs/btrfs/send.c disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); eb 1355 fs/btrfs/send.c logical = disk_byte + btrfs_file_extent_offset(eb, fi); eb 1412 fs/btrfs/send.c backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi); eb 2530 fs/btrfs/send.c struct extent_buffer *eb; eb 2555 fs/btrfs/send.c eb = path->nodes[0]; eb 2557 fs/btrfs/send.c ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); eb 2567 fs/btrfs/send.c TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime); eb 2568 fs/btrfs/send.c TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime); eb 2569 fs/btrfs/send.c TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime); eb 2678 fs/btrfs/send.c struct extent_buffer *eb; eb 2696 fs/btrfs/send.c eb = path->nodes[0]; eb 2698 fs/btrfs/send.c if (slot >= btrfs_header_nritems(eb)) { eb 2709 fs/btrfs/send.c btrfs_item_key_to_cpu(eb, &found_key, slot); eb 2716 fs/btrfs/send.c di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); eb 2717 fs/btrfs/send.c btrfs_dir_item_key_to_cpu(eb, di, &di_key); eb 4420 fs/btrfs/send.c struct extent_buffer *eb; eb 4450 fs/btrfs/send.c eb = path->nodes[0]; eb 4452 fs/btrfs/send.c if (slot >= btrfs_header_nritems(eb)) { eb 4461 fs/btrfs/send.c btrfs_item_key_to_cpu(eb, &found_key, slot); eb 4739 fs/btrfs/send.c struct extent_buffer *eb; eb 4756 fs/btrfs/send.c eb = path->nodes[0]; eb 4758 fs/btrfs/send.c if (slot >= btrfs_header_nritems(eb)) { eb 4769 fs/btrfs/send.c btrfs_item_key_to_cpu(eb, &found_key, slot); eb 5393 fs/btrfs/send.c struct extent_buffer *eb; eb 5413 fs/btrfs/send.c eb = left_path->nodes[0]; eb 5415 fs/btrfs/send.c ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); eb 5416 fs/btrfs/send.c left_type = btrfs_file_extent_type(eb, ei); eb 5422 fs/btrfs/send.c left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); eb 5423 fs/btrfs/send.c left_len = btrfs_file_extent_num_bytes(eb, ei); eb 5424 fs/btrfs/send.c left_offset = btrfs_file_extent_offset(eb, ei); eb 5425 fs/btrfs/send.c left_gen = btrfs_file_extent_generation(eb, ei); eb 5462 fs/btrfs/send.c eb = path->nodes[0]; eb 5464 fs/btrfs/send.c btrfs_item_key_to_cpu(eb, &found_key, slot); eb 5477 fs/btrfs/send.c ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); eb 5478 fs/btrfs/send.c right_type = btrfs_file_extent_type(eb, ei); eb 5486 fs/btrfs/send.c right_len = btrfs_file_extent_ram_bytes(eb, ei); eb 5489 fs/btrfs/send.c right_len = btrfs_file_extent_num_bytes(eb, ei); eb 5515 fs/btrfs/send.c right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); eb 5516 fs/btrfs/send.c right_offset = btrfs_file_extent_offset(eb, ei); eb 5517 fs/btrfs/send.c right_gen = btrfs_file_extent_generation(eb, ei); eb 5545 fs/btrfs/send.c eb = path->nodes[0]; eb 5547 fs/btrfs/send.c btrfs_item_key_to_cpu(eb, &found_key, slot); eb 5820 fs/btrfs/send.c struct extent_buffer *eb; eb 5836 fs/btrfs/send.c eb = path->nodes[0]; eb 5839 fs/btrfs/send.c if (slot >= btrfs_header_nritems(eb)) { eb 5850 fs/btrfs/send.c btrfs_item_key_to_cpu(eb, &found_key, slot); eb 6070 fs/btrfs/send.c struct extent_buffer *eb = path->nodes[0]; eb 6073 fs/btrfs/send.c if (slot >= btrfs_header_nritems(eb)) { eb 6082 fs/btrfs/send.c btrfs_item_key_to_cpu(eb, &key, slot); eb 6511 fs/btrfs/send.c struct extent_buffer *eb; eb 6529 fs/btrfs/send.c eb = path->nodes[0]; eb 6531 fs/btrfs/send.c btrfs_item_key_to_cpu(eb, &key, slot); eb 6557 fs/btrfs/send.c struct extent_buffer *eb; eb 6560 fs/btrfs/send.c eb = btrfs_read_node_slot(path->nodes[*level], path->slots[*level]); eb 6561 fs/btrfs/send.c if (IS_ERR(eb)) eb 6562 fs/btrfs/send.c return PTR_ERR(eb); eb 6564 fs/btrfs/send.c path->nodes[*level - 1] = eb; eb 41 fs/btrfs/struct-funcs.c u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ eb 56 fs/btrfs/struct-funcs.c ASSERT(token->eb == eb); \ eb 65 fs/btrfs/struct-funcs.c err = map_private_extent_buffer(eb, offset, size, \ eb 70 fs/btrfs/struct-funcs.c read_extent_buffer(eb, &leres, offset, size); \ eb 79 fs/btrfs/struct-funcs.c u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ eb 92 fs/btrfs/struct-funcs.c err = map_private_extent_buffer(eb, offset, size, \ eb 97 fs/btrfs/struct-funcs.c read_extent_buffer(eb, &leres, offset, size); \ eb 104 fs/btrfs/struct-funcs.c void btrfs_set_token_##bits(struct extent_buffer *eb, \ eb 119 fs/btrfs/struct-funcs.c ASSERT(token->eb == eb); \ eb 128 fs/btrfs/struct-funcs.c err = map_private_extent_buffer(eb, offset, size, \ eb 134 fs/btrfs/struct-funcs.c write_extent_buffer(eb, &val2, offset, size); \ eb 142 fs/btrfs/struct-funcs.c void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ eb 154 fs/btrfs/struct-funcs.c err = map_private_extent_buffer(eb, offset, size, \ eb 160 fs/btrfs/struct-funcs.c write_extent_buffer(eb, &val2, offset, size); \ eb 172 fs/btrfs/struct-funcs.c void btrfs_node_key(const struct extent_buffer *eb, eb 176 fs/btrfs/struct-funcs.c read_eb_member(eb, (struct btrfs_key_ptr *)ptr, eb 164 fs/btrfs/tests/btrfs-tests.c struct extent_buffer *eb; eb 166 fs/btrfs/tests/btrfs-tests.c eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock); eb 167 fs/btrfs/tests/btrfs-tests.c if (!eb) eb 170 fs/btrfs/tests/btrfs-tests.c if (radix_tree_exception(eb)) { eb 171 fs/btrfs/tests/btrfs-tests.c if (radix_tree_deref_retry(eb)) eb 177 fs/btrfs/tests/btrfs-tests.c free_extent_buffer_stale(eb); eb 17 fs/btrfs/tests/extent-buffer-tests.c struct extent_buffer *eb; eb 51 fs/btrfs/tests/extent-buffer-tests.c path->nodes[0] = eb = alloc_dummy_extent_buffer(fs_info, nodesize); eb 52 fs/btrfs/tests/extent-buffer-tests.c if (!eb) { eb 66 fs/btrfs/tests/extent-buffer-tests.c write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0), eb 86 fs/btrfs/tests/extent-buffer-tests.c btrfs_item_key_to_cpu(eb, &key, 0); eb 95 fs/btrfs/tests/extent-buffer-tests.c if (btrfs_item_size(eb, item) != strlen(split1)) { eb 101 fs/btrfs/tests/extent-buffer-tests.c read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 0), eb 111 fs/btrfs/tests/extent-buffer-tests.c btrfs_item_key_to_cpu(eb, &key, 1); eb 120 fs/btrfs/tests/extent-buffer-tests.c if (btrfs_item_size(eb, item) != strlen(split2)) { eb 126 fs/btrfs/tests/extent-buffer-tests.c read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 1), eb 143 fs/btrfs/tests/extent-buffer-tests.c btrfs_item_key_to_cpu(eb, &key, 0); eb 152 fs/btrfs/tests/extent-buffer-tests.c if (btrfs_item_size(eb, item) != strlen(split3)) { eb 158 fs/btrfs/tests/extent-buffer-tests.c read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 0), eb 167 fs/btrfs/tests/extent-buffer-tests.c btrfs_item_key_to_cpu(eb, &key, 1); eb 176 fs/btrfs/tests/extent-buffer-tests.c if (btrfs_item_size(eb, item) != strlen(split4)) { eb 182 fs/btrfs/tests/extent-buffer-tests.c read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 1), eb 191 fs/btrfs/tests/extent-buffer-tests.c btrfs_item_key_to_cpu(eb, &key, 2); eb 200 fs/btrfs/tests/extent-buffer-tests.c if (btrfs_item_size(eb, item) != strlen(split2)) { eb 206 fs/btrfs/tests/extent-buffer-tests.c read_extent_buffer(eb, buf, btrfs_item_ptr_offset(eb, 2), eb 271 fs/btrfs/tests/extent-io-tests.c static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb, eb 280 fs/btrfs/tests/extent-io-tests.c bit1 = !!extent_buffer_test_bit(eb, 0, i); eb 286 fs/btrfs/tests/extent-io-tests.c bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE, eb 296 fs/btrfs/tests/extent-io-tests.c static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, eb 304 fs/btrfs/tests/extent-io-tests.c memzero_extent_buffer(eb, 0, len); eb 305 fs/btrfs/tests/extent-io-tests.c if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { eb 311 fs/btrfs/tests/extent-io-tests.c extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); eb 312 fs/btrfs/tests/extent-io-tests.c ret = check_eb_bitmap(bitmap, eb, len); eb 319 fs/btrfs/tests/extent-io-tests.c extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE); eb 320 fs/btrfs/tests/extent-io-tests.c ret = check_eb_bitmap(bitmap, eb, len); eb 331 fs/btrfs/tests/extent-io-tests.c extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0, eb 333 fs/btrfs/tests/extent-io-tests.c ret = check_eb_bitmap(bitmap, eb, len); eb 343 fs/btrfs/tests/extent-io-tests.c extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); eb 344 fs/btrfs/tests/extent-io-tests.c extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0, eb 346 fs/btrfs/tests/extent-io-tests.c ret = check_eb_bitmap(bitmap, eb, len); eb 359 fs/btrfs/tests/extent-io-tests.c extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE); eb 365 fs/btrfs/tests/extent-io-tests.c extent_buffer_bitmap_set(eb, 0, i * 32 + j, 1); eb 370 fs/btrfs/tests/extent-io-tests.c ret = check_eb_bitmap(bitmap, eb, len); eb 384 fs/btrfs/tests/extent-io-tests.c struct extent_buffer *eb = NULL; eb 409 fs/btrfs/tests/extent-io-tests.c eb = __alloc_dummy_extent_buffer(fs_info, 0, len); eb 410 fs/btrfs/tests/extent-io-tests.c if (!eb) { eb 416 fs/btrfs/tests/extent-io-tests.c ret = __test_eb_bitmaps(bitmap, eb, len); eb 421 fs/btrfs/tests/extent-io-tests.c free_extent_buffer(eb); eb 422 fs/btrfs/tests/extent-io-tests.c eb = __alloc_dummy_extent_buffer(fs_info, nodesize / 2, len); eb 423 fs/btrfs/tests/extent-io-tests.c if (!eb) { eb 429 fs/btrfs/tests/extent-io-tests.c ret = __test_eb_bitmaps(bitmap, eb, len); eb 431 fs/btrfs/tests/extent-io-tests.c free_extent_buffer(eb); eb 1127 fs/btrfs/transaction.c struct extent_buffer *eb; eb 1130 fs/btrfs/transaction.c eb = btrfs_lock_root_node(fs_info->tree_root); eb 1131 fs/btrfs/transaction.c ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, eb 1132 fs/btrfs/transaction.c 0, &eb); eb 1133 fs/btrfs/transaction.c btrfs_tree_unlock(eb); eb 1134 fs/btrfs/transaction.c free_extent_buffer(eb); eb 49 fs/btrfs/tree-checker.c static void generic_err(const struct extent_buffer *eb, int slot, eb 52 fs/btrfs/tree-checker.c const struct btrfs_fs_info *fs_info = eb->fs_info; eb 63 fs/btrfs/tree-checker.c btrfs_header_level(eb) == 0 ? "leaf" : "node", eb 64 fs/btrfs/tree-checker.c btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, &vaf); eb 74 fs/btrfs/tree-checker.c static void file_extent_err(const struct extent_buffer *eb, int slot, eb 77 fs/btrfs/tree-checker.c const struct btrfs_fs_info *fs_info = eb->fs_info; eb 82 fs/btrfs/tree-checker.c btrfs_item_key_to_cpu(eb, &key, slot); eb 90 fs/btrfs/tree-checker.c btrfs_header_level(eb) == 0 ? "leaf" : "node", eb 91 fs/btrfs/tree-checker.c btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, eb 293 fs/btrfs/tree-checker.c static void dir_item_err(const struct extent_buffer *eb, int slot, eb 296 fs/btrfs/tree-checker.c const struct btrfs_fs_info *fs_info = eb->fs_info; eb 301 fs/btrfs/tree-checker.c btrfs_item_key_to_cpu(eb, &key, slot); eb 309 fs/btrfs/tree-checker.c btrfs_header_level(eb) == 0 ? "leaf" : "node", eb 310 fs/btrfs/tree-checker.c btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, eb 427 fs/btrfs/tree-checker.c static void block_group_err(const struct extent_buffer *eb, int slot, eb 430 fs/btrfs/tree-checker.c const struct btrfs_fs_info *fs_info = eb->fs_info; eb 435 fs/btrfs/tree-checker.c btrfs_item_key_to_cpu(eb, &key, slot); eb 443 fs/btrfs/tree-checker.c btrfs_header_level(eb) == 0 ? "leaf" : "node", eb 444 fs/btrfs/tree-checker.c btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, eb 679 fs/btrfs/tree-checker.c static void dev_item_err(const struct extent_buffer *eb, int slot, eb 686 fs/btrfs/tree-checker.c btrfs_item_key_to_cpu(eb, &key, slot); eb 692 fs/btrfs/tree-checker.c btrfs_crit(eb->fs_info, eb 694 fs/btrfs/tree-checker.c btrfs_header_level(eb) == 0 ? "leaf" : "node", eb 695 fs/btrfs/tree-checker.c btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, eb 740 fs/btrfs/tree-checker.c #define inode_item_err(fs_info, eb, slot, fmt, ...) \ eb 741 fs/btrfs/tree-checker.c dir_item_err(eb, slot, fmt, __VA_ARGS__) eb 921 fs/btrfs/tree-checker.c static void extent_err(const struct extent_buffer *eb, int slot, eb 930 fs/btrfs/tree-checker.c btrfs_item_key_to_cpu(eb, &key, slot); eb 935 fs/btrfs/tree-checker.c len = eb->fs_info->nodesize; eb 943 fs/btrfs/tree-checker.c btrfs_crit(eb->fs_info, eb 945 fs/btrfs/tree-checker.c btrfs_header_level(eb) == 0 ? "leaf" : "node", eb 946 fs/btrfs/tree-checker.c eb->start, slot, bytenr, len, &vaf); eb 289 fs/btrfs/tree-log.c int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, eb 297 fs/btrfs/tree-log.c struct extent_buffer *eb, eb 308 fs/btrfs/tree-log.c ret = btrfs_read_buffer(eb, gen, level, NULL); eb 314 fs/btrfs/tree-log.c ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start, eb 315 fs/btrfs/tree-log.c eb->len); eb 317 fs/btrfs/tree-log.c if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) { eb 318 fs/btrfs/tree-log.c if (wc->pin && btrfs_header_level(eb) == 0) eb 319 fs/btrfs/tree-log.c ret = btrfs_exclude_logged_extents(eb); eb 321 fs/btrfs/tree-log.c btrfs_write_tree_block(eb); eb 323 fs/btrfs/tree-log.c btrfs_wait_tree_block_writeback(eb); eb 345 fs/btrfs/tree-log.c struct extent_buffer *eb, int slot, eb 360 fs/btrfs/tree-log.c item_size = btrfs_item_size_nr(eb, slot); eb 361 fs/btrfs/tree-log.c src_ptr = btrfs_item_ptr_offset(eb, slot); eb 389 fs/btrfs/tree-log.c read_extent_buffer(eb, src_copy, src_ptr, item_size); eb 421 fs/btrfs/tree-log.c item = btrfs_item_ptr(eb, slot, eb 423 fs/btrfs/tree-log.c btrfs_set_inode_nbytes(eb, item, nbytes); eb 430 fs/btrfs/tree-log.c mode = btrfs_inode_mode(eb, item); eb 432 fs/btrfs/tree-log.c btrfs_set_inode_size(eb, item, 0); eb 442 fs/btrfs/tree-log.c item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); eb 443 fs/btrfs/tree-log.c btrfs_set_inode_nbytes(eb, item, 0); eb 450 fs/btrfs/tree-log.c mode = btrfs_inode_mode(eb, item); eb 452 fs/btrfs/tree-log.c btrfs_set_inode_size(eb, item, 0); eb 493 fs/btrfs/tree-log.c if (btrfs_inode_generation(eb, src_item) == 0) { eb 495 fs/btrfs/tree-log.c const u64 ino_size = btrfs_inode_size(eb, src_item); eb 504 fs/btrfs/tree-log.c if (S_ISREG(btrfs_inode_mode(eb, src_item)) && eb 517 fs/btrfs/tree-log.c S_ISDIR(btrfs_inode_mode(eb, src_item)) && eb 525 fs/btrfs/tree-log.c copy_extent_buffer(path->nodes[0], eb, dst_ptr, eb 583 fs/btrfs/tree-log.c struct extent_buffer *eb, int slot, eb 596 fs/btrfs/tree-log.c item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); eb 597 fs/btrfs/tree-log.c found_type = btrfs_file_extent_type(eb, item); eb 601 fs/btrfs/tree-log.c nbytes = btrfs_file_extent_num_bytes(eb, item); eb 608 fs/btrfs/tree-log.c if (btrfs_file_extent_disk_bytenr(eb, item) == 0) eb 611 fs/btrfs/tree-log.c size = btrfs_file_extent_ram_bytes(eb, item); eb 612 fs/btrfs/tree-log.c nbytes = btrfs_file_extent_ram_bytes(eb, item); eb 646 fs/btrfs/tree-log.c read_extent_buffer(eb, &cmp1, (unsigned long)item, eb 673 fs/btrfs/tree-log.c if (btrfs_file_extent_disk_bytenr(eb, item) == 0 && eb 683 fs/btrfs/tree-log.c copy_extent_buffer(path->nodes[0], eb, dest_offset, eb 686 fs/btrfs/tree-log.c ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); eb 687 fs/btrfs/tree-log.c ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); eb 689 fs/btrfs/tree-log.c offset = key->offset - btrfs_file_extent_offset(eb, item); eb 700 fs/btrfs/tree-log.c btrfs_file_extent_disk_bytenr(eb, item), eb 701 fs/btrfs/tree-log.c btrfs_file_extent_disk_num_bytes(eb, item), eb 741 fs/btrfs/tree-log.c if (btrfs_file_extent_compression(eb, item)) { eb 746 fs/btrfs/tree-log.c btrfs_file_extent_offset(eb, item); eb 748 fs/btrfs/tree-log.c btrfs_file_extent_num_bytes(eb, item); eb 828 fs/btrfs/tree-log.c ret = overwrite_item(trans, root, path, eb, slot, key); eb 1177 fs/btrfs/tree-log.c static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, eb 1185 fs/btrfs/tree-log.c *namelen = btrfs_inode_extref_name_len(eb, extref); eb 1190 fs/btrfs/tree-log.c read_extent_buffer(eb, *name, (unsigned long)&extref->name, eb 1194 fs/btrfs/tree-log.c *index = btrfs_inode_extref_index(eb, extref); eb 1196 fs/btrfs/tree-log.c *parent_objectid = btrfs_inode_extref_parent(eb, extref); eb 1201 fs/btrfs/tree-log.c static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, eb 1208 fs/btrfs/tree-log.c *namelen = btrfs_inode_ref_name_len(eb, ref); eb 1213 fs/btrfs/tree-log.c read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); eb 1216 fs/btrfs/tree-log.c *index = btrfs_inode_ref_index(eb, ref); eb 1239 fs/btrfs/tree-log.c struct extent_buffer *eb; eb 1251 fs/btrfs/tree-log.c eb = path->nodes[0]; eb 1252 fs/btrfs/tree-log.c ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]); eb 1253 fs/btrfs/tree-log.c ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]); eb 1260 fs/btrfs/tree-log.c ret = extref_get_fields(eb, ref_ptr, &namelen, &name, eb 1264 fs/btrfs/tree-log.c ret = ref_get_fields(eb, ref_ptr, &namelen, &name, eb 1420 fs/btrfs/tree-log.c struct extent_buffer *eb, int slot, eb 1437 fs/btrfs/tree-log.c ref_ptr = btrfs_item_ptr_offset(eb, slot); eb 1438 fs/btrfs/tree-log.c ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); eb 1446 fs/btrfs/tree-log.c parent_objectid = btrfs_inode_extref_parent(eb, r); eb 1473 fs/btrfs/tree-log.c ret = extref_get_fields(eb, ref_ptr, &namelen, &name, eb 1486 fs/btrfs/tree-log.c ret = ref_get_fields(eb, ref_ptr, &namelen, &name, eb 1571 fs/btrfs/tree-log.c ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot, eb 1577 fs/btrfs/tree-log.c ret = overwrite_item(trans, root, path, eb, slot, key); eb 1931 fs/btrfs/tree-log.c struct extent_buffer *eb, eb 1951 fs/btrfs/tree-log.c name_len = btrfs_dir_name_len(eb, di); eb 1958 fs/btrfs/tree-log.c log_type = btrfs_dir_type(eb, di); eb 1959 fs/btrfs/tree-log.c read_extent_buffer(eb, name, (unsigned long)(di + 1), eb 1962 fs/btrfs/tree-log.c btrfs_dir_item_key_to_cpu(eb, di, &log_key); eb 2056 fs/btrfs/tree-log.c struct extent_buffer *eb, int slot, eb 2060 fs/btrfs/tree-log.c u32 item_size = btrfs_item_size_nr(eb, slot); eb 2067 fs/btrfs/tree-log.c ptr = btrfs_item_ptr_offset(eb, slot); eb 2071 fs/btrfs/tree-log.c name_len = btrfs_dir_name_len(eb, di); eb 2072 fs/btrfs/tree-log.c ret = replay_one_name(trans, root, path, eb, di, key); eb 2105 fs/btrfs/tree-log.c if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) { eb 2116 fs/btrfs/tree-log.c btrfs_dir_item_key_to_cpu(eb, di, &di_key); eb 2224 fs/btrfs/tree-log.c struct extent_buffer *eb; eb 2237 fs/btrfs/tree-log.c eb = path->nodes[0]; eb 2239 fs/btrfs/tree-log.c item_size = btrfs_item_size_nr(eb, slot); eb 2240 fs/btrfs/tree-log.c ptr = btrfs_item_ptr_offset(eb, slot); eb 2244 fs/btrfs/tree-log.c name_len = btrfs_dir_name_len(eb, di); eb 2250 fs/btrfs/tree-log.c read_extent_buffer(eb, name, (unsigned long)(di + 1), eb 2265 fs/btrfs/tree-log.c btrfs_dir_item_key_to_cpu(eb, di, &location); eb 2535 fs/btrfs/tree-log.c static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, eb 2545 fs/btrfs/tree-log.c ret = btrfs_read_buffer(eb, gen, level, NULL); eb 2549 fs/btrfs/tree-log.c level = btrfs_header_level(eb); eb 2558 fs/btrfs/tree-log.c nritems = btrfs_header_nritems(eb); eb 2560 fs/btrfs/tree-log.c btrfs_item_key_to_cpu(eb, &key, i); eb 2568 fs/btrfs/tree-log.c inode_item = btrfs_item_ptr(eb, i, eb 2578 fs/btrfs/tree-log.c if (btrfs_inode_nlink(eb, inode_item) == 0) { eb 2588 fs/btrfs/tree-log.c mode = btrfs_inode_mode(eb, inode_item); eb 2596 fs/btrfs/tree-log.c eb, i, &key); eb 2643 fs/btrfs/tree-log.c eb, i, &key); eb 2654 fs/btrfs/tree-log.c eb, i, &key); eb 2660 fs/btrfs/tree-log.c eb, i, &key); eb 2666 fs/btrfs/tree-log.c eb, i, &key); eb 2671 fs/btrfs/tree-log.c eb, i, &key); eb 4735 fs/btrfs/tree-log.c static int btrfs_check_ref_name_override(struct extent_buffer *eb, eb 4745 fs/btrfs/tree-log.c u32 item_size = btrfs_item_size_nr(eb, slot); eb 4747 fs/btrfs/tree-log.c unsigned long ptr = btrfs_item_ptr_offset(eb, slot); eb 4767 fs/btrfs/tree-log.c this_name_len = btrfs_inode_ref_name_len(eb, iref); eb 4775 fs/btrfs/tree-log.c parent = btrfs_inode_extref_parent(eb, extref); eb 4776 fs/btrfs/tree-log.c this_name_len = btrfs_inode_extref_name_len(eb, extref); eb 4793 fs/btrfs/tree-log.c read_extent_buffer(eb, name, name_ptr, this_name_len); eb 27 fs/btrfs/uuid-tree.c struct extent_buffer *eb; eb 53 fs/btrfs/uuid-tree.c eb = path->nodes[0]; eb 55 fs/btrfs/uuid-tree.c item_size = btrfs_item_size_nr(eb, slot); eb 56 fs/btrfs/uuid-tree.c offset = btrfs_item_ptr_offset(eb, slot); eb 68 fs/btrfs/uuid-tree.c read_extent_buffer(eb, &data, offset, sizeof(data)); eb 90 fs/btrfs/uuid-tree.c struct extent_buffer *eb; eb 116 fs/btrfs/uuid-tree.c eb = path->nodes[0]; eb 118 fs/btrfs/uuid-tree.c offset = btrfs_item_ptr_offset(eb, slot); eb 125 fs/btrfs/uuid-tree.c eb = path->nodes[0]; eb 127 fs/btrfs/uuid-tree.c offset = btrfs_item_ptr_offset(eb, slot); eb 128 fs/btrfs/uuid-tree.c offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le); eb 139 fs/btrfs/uuid-tree.c write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le)); eb 140 fs/btrfs/uuid-tree.c btrfs_mark_buffer_dirty(eb); eb 155 fs/btrfs/uuid-tree.c struct extent_buffer *eb; eb 187 fs/btrfs/uuid-tree.c eb = path->nodes[0]; eb 189 fs/btrfs/uuid-tree.c offset = btrfs_item_ptr_offset(eb, slot); eb 190 fs/btrfs/uuid-tree.c item_size = btrfs_item_size_nr(eb, slot); eb 200 fs/btrfs/uuid-tree.c read_extent_buffer(eb, &read_subid, offset, sizeof(read_subid)); eb 212 fs/btrfs/uuid-tree.c item_size = btrfs_item_size_nr(eb, slot); eb 220 fs/btrfs/uuid-tree.c move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot)); eb 221 fs/btrfs/uuid-tree.c memmove_extent_buffer(eb, move_dst, move_src, move_len); eb 4466 fs/btrfs/volumes.c struct extent_buffer *eb; eb 4497 fs/btrfs/volumes.c eb = path->nodes[0]; eb 4499 fs/btrfs/volumes.c item_size = btrfs_item_size_nr(eb, slot); eb 4503 fs/btrfs/volumes.c read_extent_buffer(eb, &root_item, eb 4504 fs/btrfs/volumes.c btrfs_item_ptr_offset(eb, slot), eb 7347 fs/btrfs/volumes.c static u64 btrfs_dev_stats_value(const struct extent_buffer *eb, eb 7353 fs/btrfs/volumes.c read_extent_buffer(eb, &val, eb 7360 fs/btrfs/volumes.c static void btrfs_set_dev_stats_value(struct extent_buffer *eb, eb 7364 fs/btrfs/volumes.c write_extent_buffer(eb, &val, eb 7375 fs/btrfs/volumes.c struct extent_buffer *eb; eb 7403 fs/btrfs/volumes.c eb = path->nodes[0]; eb 7404 fs/btrfs/volumes.c item_size = btrfs_item_size_nr(eb, slot); eb 7406 fs/btrfs/volumes.c ptr = btrfs_item_ptr(eb, slot, eb 7412 fs/btrfs/volumes.c btrfs_dev_stats_value(eb, ptr, i)); eb 7434 fs/btrfs/volumes.c struct extent_buffer *eb; eb 7480 fs/btrfs/volumes.c eb = path->nodes[0]; eb 7481 fs/btrfs/volumes.c ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item); eb 7483 fs/btrfs/volumes.c btrfs_set_dev_stats_value(eb, ptr, i, eb 7485 fs/btrfs/volumes.c btrfs_mark_buffer_dirty(eb); eb 565 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb); eb 668 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data; eb 679 fs/ocfs2/alloc.c path->p_node[index].el = &eb->h_list; eb 882 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb = eb 894 fs/ocfs2/alloc.c rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &eb->h_check); eb 905 fs/ocfs2/alloc.c if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { eb 909 fs/ocfs2/alloc.c eb->h_signature); eb 913 fs/ocfs2/alloc.c if (le64_to_cpu(eb->h_blkno) != bh->b_blocknr) { eb 917 fs/ocfs2/alloc.c (unsigned long long)le64_to_cpu(eb->h_blkno)); eb 921 fs/ocfs2/alloc.c if (le32_to_cpu(eb->h_fs_generation) != OCFS2_SB(sb)->fs_generation) eb 925 fs/ocfs2/alloc.c le32_to_cpu(eb->h_fs_generation)); eb 954 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 968 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *) eb_bh->b_data; eb 969 fs/ocfs2/alloc.c el = &eb->h_list; eb 999 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 1033 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *) bhs[i]->b_data; eb 1035 fs/ocfs2/alloc.c strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); eb 1036 fs/ocfs2/alloc.c eb->h_blkno = cpu_to_le64(first_blkno); eb 1037 fs/ocfs2/alloc.c eb->h_fs_generation = cpu_to_le32(osb->fs_generation); eb 1038 fs/ocfs2/alloc.c eb->h_suballoc_slot = eb 1040 fs/ocfs2/alloc.c eb->h_suballoc_loc = cpu_to_le64(suballoc_loc); eb 1041 fs/ocfs2/alloc.c eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start); eb 1042 fs/ocfs2/alloc.c eb->h_list.l_count = eb 1157 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 1165 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *) eb_bh->b_data; eb 1166 fs/ocfs2/alloc.c el = &eb->h_list; eb 1175 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data; eb 1176 fs/ocfs2/alloc.c new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list); eb 1245 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *) bh->b_data; eb 1247 fs/ocfs2/alloc.c BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); eb 1248 fs/ocfs2/alloc.c eb_el = &eb->h_list; eb 1257 fs/ocfs2/alloc.c eb->h_next_leaf_blk = 0; eb 1273 fs/ocfs2/alloc.c new_last_eb_blk = le64_to_cpu(eb->h_blkno); eb 1276 fs/ocfs2/alloc.c next_blkno = le64_to_cpu(eb->h_blkno); eb 1318 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; eb 1319 fs/ocfs2/alloc.c eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk); eb 1358 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 1379 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *) new_eb_bh->b_data; eb 1381 fs/ocfs2/alloc.c BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb)); eb 1383 fs/ocfs2/alloc.c eb_el = &eb->h_list; eb 1413 fs/ocfs2/alloc.c root_el->l_recs[0].e_blkno = eb->h_blkno; eb 1422 fs/ocfs2/alloc.c ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); eb 1457 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 1491 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *) bh->b_data; eb 1492 fs/ocfs2/alloc.c el = &eb->h_list; eb 1796 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 1844 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *) bh->b_data; eb 1845 fs/ocfs2/alloc.c el = &eb->h_list; eb 1908 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data; eb 1909 fs/ocfs2/alloc.c struct ocfs2_extent_list *el = &eb->h_list; eb 2531 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 2541 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; eb 2542 fs/ocfs2/alloc.c BUG_ON(eb->h_next_leaf_blk != 0ULL); eb 2544 fs/ocfs2/alloc.c el = &eb->h_list; eb 2570 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 2577 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *)bh->b_data; eb 2582 fs/ocfs2/alloc.c el = &eb->h_list; eb 2588 fs/ocfs2/alloc.c (unsigned long long)le64_to_cpu(eb->h_blkno), eb 2601 fs/ocfs2/alloc.c ret = ocfs2_cache_extent_block_free(dealloc, eb); eb 2619 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 2621 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *)right_path->p_node[subtree_index + 1].bh->b_data; eb 2624 fs/ocfs2/alloc.c if (root_el->l_recs[i].e_blkno == eb->h_blkno) eb 2632 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; eb 2633 fs/ocfs2/alloc.c eb->h_next_leaf_blk = 0; eb 2653 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 2665 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *)path_leaf_bh(right_path)->b_data; eb 2680 fs/ocfs2/alloc.c if (eb->h_next_leaf_blk != 0ULL) eb 2697 fs/ocfs2/alloc.c if (eb->h_next_leaf_blk == 0ULL && eb 2753 fs/ocfs2/alloc.c if (eb->h_next_leaf_blk == 0ULL) { eb 2775 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; eb 2776 fs/ocfs2/alloc.c ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); eb 3025 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 3079 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data; eb 3080 fs/ocfs2/alloc.c ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno)); eb 3153 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 3185 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; eb 3186 fs/ocfs2/alloc.c el = &eb->h_list; eb 3187 fs/ocfs2/alloc.c if (eb->h_next_leaf_blk == 0) { eb 3200 fs/ocfs2/alloc.c (unsigned long long)le64_to_cpu(eb->h_blkno)); eb 4364 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 4392 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *)bh->b_data; eb 4395 fs/ocfs2/alloc.c (unsigned long long)le64_to_cpu(eb->h_blkno), eb 4446 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *)bh->b_data; eb 4449 fs/ocfs2/alloc.c (unsigned long long)le64_to_cpu(eb->h_blkno), eb 4579 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 4603 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *) bh->b_data; eb 4604 fs/ocfs2/alloc.c el = &eb->h_list; eb 4919 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 4933 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data; eb 4934 fs/ocfs2/alloc.c rightmost_el = &eb->h_list; eb 5281 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 5305 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; eb 5306 fs/ocfs2/alloc.c rightmost_el = &eb->h_list; eb 5356 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 5385 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data; eb 5386 fs/ocfs2/alloc.c if (eb->h_next_leaf_blk == 0) eb 6661 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb; eb 6713 fs/ocfs2/alloc.c eb = (struct ocfs2_extent_block *) new_eb_bh[i]->b_data; eb 6718 fs/ocfs2/alloc.c strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE); eb 6719 fs/ocfs2/alloc.c eb->h_blkno = cpu_to_le64(bf->free_blk); eb 6720 fs/ocfs2/alloc.c eb->h_fs_generation = cpu_to_le32(osb->fs_generation); eb 6721 fs/ocfs2/alloc.c eb->h_suballoc_slot = cpu_to_le16(real_slot); eb 6722 fs/ocfs2/alloc.c eb->h_suballoc_loc = cpu_to_le64(bf->free_bg); eb 6723 fs/ocfs2/alloc.c eb->h_suballoc_bit = cpu_to_le16(bf->free_bit); eb 6724 fs/ocfs2/alloc.c eb->h_list.l_count = eb 6789 fs/ocfs2/alloc.c struct ocfs2_extent_block *eb) eb 6792 fs/ocfs2/alloc.c le16_to_cpu(eb->h_suballoc_slot), eb 6793 fs/ocfs2/alloc.c le64_to_cpu(eb->h_suballoc_loc), eb 6794 fs/ocfs2/alloc.c le64_to_cpu(eb->h_blkno), eb 6795 fs/ocfs2/alloc.c le16_to_cpu(eb->h_suballoc_bit)); eb 774 fs/ocfs2/dir.c struct ocfs2_extent_block *eb; eb 785 fs/ocfs2/dir.c eb = (struct ocfs2_extent_block *) eb_bh->b_data; eb 786 fs/ocfs2/dir.c el = &eb->h_list; eb 282 fs/ocfs2/extent_map.c struct ocfs2_extent_block *eb; eb 291 fs/ocfs2/extent_map.c eb = (struct ocfs2_extent_block *) eb_bh->b_data; eb 292 fs/ocfs2/extent_map.c el = &eb->h_list; eb 353 fs/ocfs2/extent_map.c struct ocfs2_extent_block *eb, *next_eb; eb 358 fs/ocfs2/extent_map.c eb = (struct ocfs2_extent_block *)eb_bh->b_data; eb 364 fs/ocfs2/extent_map.c if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL) eb 368 fs/ocfs2/extent_map.c le64_to_cpu(eb->h_next_leaf_blk), eb 406 fs/ocfs2/extent_map.c struct ocfs2_extent_block *uninitialized_var(eb); eb 427 fs/ocfs2/extent_map.c eb = (struct ocfs2_extent_block *) eb_bh->b_data; eb 428 fs/ocfs2/extent_map.c el = &eb->h_list; eb 496 fs/ocfs2/extent_map.c else if (eb->h_blkno == di->i_last_eb_blk) eb 498 fs/ocfs2/extent_map.c else if (eb->h_next_leaf_blk == di->i_last_eb_blk) { eb 539 fs/ocfs2/extent_map.c struct ocfs2_extent_block *eb; eb 551 fs/ocfs2/extent_map.c eb = (struct ocfs2_extent_block *) eb_bh->b_data; eb 552 fs/ocfs2/extent_map.c el = &eb->h_list; eb 958 fs/ocfs2/refcounttree.c struct ocfs2_extent_block *eb, eb 979 fs/ocfs2/refcounttree.c if (!eb || (eb && !eb->h_next_leaf_blk)) { eb 1003 fs/ocfs2/refcounttree.c cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos); eb 1067 fs/ocfs2/refcounttree.c struct ocfs2_extent_block *eb = NULL; eb 1091 fs/ocfs2/refcounttree.c eb = (struct ocfs2_extent_block *) eb_bh->b_data; eb 1092 fs/ocfs2/refcounttree.c el = &eb->h_list; eb 1115 fs/ocfs2/refcounttree.c eb, el, i, &cpos_end); eb 2638 fs/ocfs2/refcounttree.c struct ocfs2_extent_block *eb = NULL; eb 2653 fs/ocfs2/refcounttree.c eb = (struct ocfs2_extent_block *) eb_bh->b_data; eb 2654 fs/ocfs2/refcounttree.c el = &eb->h_list; eb 2802 fs/ocfs2/refcounttree.c eb && eb->h_next_leaf_blk) { eb 2807 fs/ocfs2/refcounttree.c le64_to_cpu(eb->h_next_leaf_blk), eb 2814 fs/ocfs2/refcounttree.c eb = (struct ocfs2_extent_block *) eb_bh->b_data; eb 2815 fs/ocfs2/refcounttree.c el = &eb->h_list; eb 3694 fs/ocfs2/xattr.c struct ocfs2_extent_block *eb; eb 3706 fs/ocfs2/xattr.c eb = (struct ocfs2_extent_block *) eb_bh->b_data; eb 3707 fs/ocfs2/xattr.c el = &eb->h_list; eb 390 include/linux/ceph/osd_client.h void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb); eb 257 include/media/v4l2-mem2mem.h struct v4l2_exportbuffer *eb); eb 660 include/media/v4l2-mem2mem.h struct v4l2_exportbuffer *eb); eb 156 include/media/videobuf2-v4l2.h int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb); eb 2012 include/trace/events/btrfs.h TP_PROTO(const struct extent_buffer *eb, u64 start_ns), eb 2014 include/trace/events/btrfs.h TP_ARGS(eb, start_ns), eb 2026 include/trace/events/btrfs.h TP_fast_assign_btrfs(eb->fs_info, eb 2027 include/trace/events/btrfs.h __entry->block = eb->start; eb 2028 include/trace/events/btrfs.h __entry->generation = btrfs_header_generation(eb); eb 2032 include/trace/events/btrfs.h __entry->owner = btrfs_header_owner(eb); eb 2033 include/trace/events/btrfs.h __entry->is_log_tree = (eb->log_index >= 0); eb 2044 include/trace/events/btrfs.h TP_PROTO(const struct extent_buffer *eb, u64 start_ns), eb 2046 include/trace/events/btrfs.h TP_ARGS(eb, start_ns) eb 2050 include/trace/events/btrfs.h TP_PROTO(const struct extent_buffer *eb, u64 start_ns), eb 2052 include/trace/events/btrfs.h TP_ARGS(eb, start_ns) eb 2056 include/trace/events/btrfs.h TP_PROTO(const struct extent_buffer *eb), eb 2058 include/trace/events/btrfs.h TP_ARGS(eb), eb 2067 include/trace/events/btrfs.h TP_fast_assign_btrfs(eb->fs_info, eb 2068 include/trace/events/btrfs.h __entry->block = eb->start; eb 2069 include/trace/events/btrfs.h __entry->generation = btrfs_header_generation(eb); eb 2070 include/trace/events/btrfs.h __entry->owner = btrfs_header_owner(eb); eb 2071 include/trace/events/btrfs.h __entry->is_log_tree = (eb->log_index >= 0); eb 2081 include/trace/events/btrfs.h TP_PROTO(const struct extent_buffer *eb), \ eb 2083 include/trace/events/btrfs.h TP_ARGS(eb) \ eb 2503 net/ceph/osd_client.c static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) eb 2505 net/ceph/osd_client.c if (likely(eb > osdc->epoch_barrier)) { eb 2507 net/ceph/osd_client.c osdc->epoch_barrier, eb); eb 2508 net/ceph/osd_client.c osdc->epoch_barrier = eb; eb 2510 net/ceph/osd_client.c if (eb > osdc->osdmap->epoch) eb 2515 net/ceph/osd_client.c void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb) eb 2518 net/ceph/osd_client.c if (unlikely(eb > osdc->epoch_barrier)) { eb 2521 net/ceph/osd_client.c update_epoch_barrier(osdc, eb); eb 5703 tools/lib/traceevent/event-parse.c struct tep_event * const * eb = b; eb 5705 tools/lib/traceevent/event-parse.c if ((*ea)->id < (*eb)->id) eb 5708 tools/lib/traceevent/event-parse.c if ((*ea)->id > (*eb)->id) eb 5717 tools/lib/traceevent/event-parse.c struct tep_event * const * eb = b; eb 5720 tools/lib/traceevent/event-parse.c res = strcmp((*ea)->name, (*eb)->name); eb 5724 tools/lib/traceevent/event-parse.c res = strcmp((*ea)->system, (*eb)->system); eb 5734 tools/lib/traceevent/event-parse.c struct tep_event * const * eb = b; eb 5737 tools/lib/traceevent/event-parse.c res = strcmp((*ea)->system, (*eb)->system); eb 5741 tools/lib/traceevent/event-parse.c res = strcmp((*ea)->name, (*eb)->name); eb 99 tools/lib/traceevent/parse-filter.c const struct tep_filter_type *eb = b; eb 101 tools/lib/traceevent/parse-filter.c if (ea->event_id < eb->event_id) eb 104 tools/lib/traceevent/parse-filter.c if (ea->event_id > eb->event_id)