wb 1974 arch/alpha/kernel/smc37c669.c wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY ); wb 1975 arch/alpha/kernel/smc37c669.c wb( &SMC37c669->index_port, SMC37c669_CONFIG_ON_KEY ); wb 1979 arch/alpha/kernel/smc37c669.c wb( &SMC37c669->index_port, SMC37c669_CONFIG_OFF_KEY ); wb 2009 arch/alpha/kernel/smc37c669.c wb(&SMC37c669->index_port, index); wb 2043 arch/alpha/kernel/smc37c669.c wb( &SMC37c669->index_port, index ); wb 2044 arch/alpha/kernel/smc37c669.c wb( &SMC37c669->data_port, data ); wb 892 arch/mips/include/asm/sn/sn0/hubio.h wb: 1, /* 15: writeback pending. */ wb 78 arch/s390/include/asm/pci_clp.h u64 wb; wb 171 arch/s390/pci/pci_clp.c zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb; wb 304 arch/xtensa/kernel/ptrace.c unsigned long wb = regs->windowbase; wb 306 arch/xtensa/kernel/ptrace.c tmp = ((ws >> wb) | (ws << (WSBITS - wb))) & wb 57 arch/xtensa/kernel/signal.c const unsigned long wb = regs->windowbase; wb 70 arch/xtensa/kernel/signal.c wm = (ws >> wb) | (ws << (XCHAL_NUM_AREGS / 4 - wb)); wb 117 arch/xtensa/kernel/signal.c regs->windowstart = 1 << wb; wb 99 block/blk-wbt.c struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb; wb 101 block/blk-wbt.c return time_before(jiffies, wb->dirty_sleep + HZ); wb 258 drivers/char/ps3flash.c int wb; wb 265 drivers/char/ps3flash.c wb = ps3flash_writeback(ps3flash_dev); wb 266 drivers/char/ps3flash.c if (wb) wb 267 drivers/char/ps3flash.c return wb; wb 187 drivers/crypto/sunxi-ss/sun4i-ss-hash.c __le32 wb = 0; wb 399 drivers/crypto/sunxi-ss/sun4i-ss-hash.c wb = cpu_to_le32(*(u32 *)(op->buf + nwait * 4)); wb 400 drivers/crypto/sunxi-ss/sun4i-ss-hash.c wb &= GENMASK((nbw * 8) - 1, 0); wb 407 drivers/crypto/sunxi-ss/sun4i-ss-hash.c wb |= ((1 << 7) << (nbw * 8)); wb 408 drivers/crypto/sunxi-ss/sun4i-ss-hash.c bf[j++] = le32_to_cpu(wb); wb 509 drivers/gpu/drm/amd/amdgpu/amdgpu.h volatile uint32_t *wb; wb 515 drivers/gpu/drm/amd/amdgpu/amdgpu.h int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); wb 516 drivers/gpu/drm/amd/amdgpu/amdgpu.h void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); wb 877 drivers/gpu/drm/amd/amdgpu/amdgpu.h struct amdgpu_wb wb; wb 664 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c if (adev->wb.wb_obj) { wb 665 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c amdgpu_bo_free_kernel(&adev->wb.wb_obj, wb 666 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c &adev->wb.gpu_addr, wb 667 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c (void **)&adev->wb.wb); wb 668 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c adev->wb.wb_obj = NULL; wb 685 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c if (adev->wb.wb_obj == NULL) { wb 689 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c &adev->wb.wb_obj, &adev->wb.gpu_addr, wb 690 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c (void **)&adev->wb.wb); wb 696 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c adev->wb.num_wb = AMDGPU_MAX_WB; wb 697 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c memset(&adev->wb.used, 0, sizeof(adev->wb.used)); wb 700 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8); wb 715 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) wb 717 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); wb 719 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c if (offset < adev->wb.num_wb) { wb 720 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c __set_bit(offset, adev->wb.used); wb 721 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c *wb = offset << 3; /* convert to dw offset */ wb 736 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) wb 738 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c wb >>= 3; wb 739 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c if (wb < adev->wb.num_wb) wb 740 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c __clear_bit(wb, adev->wb.used); wb 398 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; wb 399 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); wb 98 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c ih->wptr_addr = adev->wb.gpu_addr + wptr_offs * 4; wb 99 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c ih->wptr_cpu = &adev->wb.wb[wptr_offs]; wb 100 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c ih->rptr_addr = adev->wb.gpu_addr + rptr_offs * 4; wb 101 drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c ih->rptr_cpu = &adev->wb.wb[rptr_offs]; wb 292 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c adev->wb.gpu_addr + (ring->trail_fence_offs * 4); wb 293 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c ring->trail_fence_cpu_addr = &adev->wb.wb[ring->trail_fence_offs]; wb 300 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4); wb 301 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs]; wb 242 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) wb 51 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c if (adev->wb.wb_obj) wb 87 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c return adev->wb.wb[adev->virt.reg_val_offs]; wb 167 drivers/gpu/drm/amd/amdgpu/cik_sdma.c rptr = ring->adev->wb.wb[ring->rptr_offs]; wb 478 drivers/gpu/drm/amd/amdgpu/cik_sdma.c upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); wb 480 drivers/gpu/drm/amd/amdgpu/cik_sdma.c ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); wb 626 drivers/gpu/drm/amd/amdgpu/cik_sdma.c gpu_addr = adev->wb.gpu_addr + (index * 4); wb 628 drivers/gpu/drm/amd/amdgpu/cik_sdma.c adev->wb.wb[index] = cpu_to_le32(tmp); wb 642 drivers/gpu/drm/amd/amdgpu/cik_sdma.c tmp = le32_to_cpu(adev->wb.wb[index]); wb 678 drivers/gpu/drm/amd/amdgpu/cik_sdma.c gpu_addr = adev->wb.gpu_addr + (index * 4); wb 680 drivers/gpu/drm/amd/amdgpu/cik_sdma.c adev->wb.wb[index] = cpu_to_le32(tmp); wb 704 drivers/gpu/drm/amd/amdgpu/cik_sdma.c tmp = le32_to_cpu(adev->wb.wb[index]); wb 275 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 2845 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb 2850 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 2881 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb 2885 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 3063 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb 3069 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 3342 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb 3348 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 3558 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); wb 4360 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 is 32bit rptr*/ wb 4370 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]); wb 4385 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr); wb 4395 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c return ring->adev->wb.wb[ring->rptr_offs]; /* gfx10 hardware is 32bit rptr */ wb 4404 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]); wb 4416 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr); wb 4800 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + wb 4802 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + wb 2124 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb 2146 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c return ring->adev->wb.wb[ring->rptr_offs]; wb 2210 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb 2229 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb 2635 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb 2660 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c return ring->adev->wb.wb[ring->rptr_offs]; wb 2681 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c return ring->adev->wb.wb[ring->wptr_offs]; wb 2689 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); wb 2989 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 2994 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb 888 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c gpu_addr = adev->wb.gpu_addr + (index * 4); wb 889 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); wb 914 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c tmp = adev->wb.wb[index]; wb 4322 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb 4326 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 4413 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 4524 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb 4530 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 6043 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c return ring->adev->wb.wb[ring->rptr_offs]; wb 6052 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c return ring->adev->wb.wb[ring->wptr_offs]; wb 6063 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); wb 6263 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c return ring->adev->wb.wb[ring->wptr_offs]; wb 6271 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); wb 6495 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + wb 6497 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + wb 891 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c gpu_addr = adev->wb.gpu_addr + (index * 4); wb 892 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); wb 917 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c tmp = adev->wb.wb[index]; wb 3228 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb 3232 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 3382 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 3503 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb 3509 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 3761 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); wb 4949 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/ wb 4959 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]); wb 4974 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr); wb 5139 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */ wb 5148 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]); wb 5259 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr); wb 5415 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + wb 5417 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + wb 197 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c return ring->adev->wb.wb[ring->rptr_offs] >> 2; wb 456 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); wb 458 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); wb 560 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c gpu_addr = adev->wb.gpu_addr + (index * 4); wb 562 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c adev->wb.wb[index] = cpu_to_le32(tmp); wb 577 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c tmp = le32_to_cpu(adev->wb.wb[index]); wb 613 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c gpu_addr = adev->wb.gpu_addr + (index * 4); wb 615 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c adev->wb.wb[index] = cpu_to_le32(tmp); wb 643 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c tmp = le32_to_cpu(adev->wb.wb[index]); wb 353 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c return ring->adev->wb.wb[ring->rptr_offs] >> 2; wb 370 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2; wb 390 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs]; wb 392 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2)); wb 395 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs]; wb 397 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2)); wb 695 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); wb 697 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); wb 716 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 832 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c gpu_addr = adev->wb.gpu_addr + (index * 4); wb 834 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c adev->wb.wb[index] = cpu_to_le32(tmp); wb 849 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c tmp = le32_to_cpu(adev->wb.wb[index]); wb 885 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c gpu_addr = adev->wb.gpu_addr + (index * 4); wb 887 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c adev->wb.wb[index] = cpu_to_le32(tmp); wb 915 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c tmp = le32_to_cpu(adev->wb.wb[index]); wb 546 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]); wb 566 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); wb 592 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs]; wb 602 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c WRITE_ONCE(*wb, (ring->wptr << 2)); wb 635 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); wb 657 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs]; wb 660 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c WRITE_ONCE(*wb, (ring->wptr << 2)); wb 1006 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); wb 1008 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); wb 1038 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 1096 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); wb 1098 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); wb 1129 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 1385 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c gpu_addr = adev->wb.gpu_addr + (index * 4); wb 1387 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c adev->wb.wb[index] = cpu_to_le32(tmp); wb 1402 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c tmp = le32_to_cpu(adev->wb.wb[index]); wb 1438 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c gpu_addr = adev->wb.gpu_addr + (index * 4); wb 1440 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c adev->wb.wb[index] = cpu_to_le32(tmp); wb 1468 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c tmp = le32_to_cpu(adev->wb.wb[index]); wb 273 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]); wb 294 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]); wb 336 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2); wb 337 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2); wb 658 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); wb 673 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); wb 675 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); wb 900 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c gpu_addr = adev->wb.gpu_addr + (index * 4); wb 902 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c adev->wb.wb[index] = cpu_to_le32(tmp); wb 920 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c tmp = le32_to_cpu(adev->wb.wb[index]); wb 968 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c gpu_addr = adev->wb.gpu_addr + (index * 4); wb 970 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c adev->wb.wb[index] = cpu_to_le32(tmp); wb 1002 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c tmp = le32_to_cpu(adev->wb.wb[index]); wb 43 drivers/gpu/drm/amd/amdgpu/si_dma.c return ring->adev->wb.wb[ring->rptr_offs>>2]; wb 156 drivers/gpu/drm/amd/amdgpu/si_dma.c rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); wb 215 drivers/gpu/drm/amd/amdgpu/si_dma.c gpu_addr = adev->wb.gpu_addr + (index * 4); wb 217 drivers/gpu/drm/amd/amdgpu/si_dma.c adev->wb.wb[index] = cpu_to_le32(tmp); wb 230 drivers/gpu/drm/amd/amdgpu/si_dma.c tmp = le32_to_cpu(adev->wb.wb[index]); wb 266 drivers/gpu/drm/amd/amdgpu/si_dma.c gpu_addr = adev->wb.gpu_addr + (index * 4); wb 268 drivers/gpu/drm/amd/amdgpu/si_dma.c adev->wb.wb[index] = cpu_to_le32(tmp); wb 290 drivers/gpu/drm/amd/amdgpu/si_dma.c tmp = le32_to_cpu(adev->wb.wb[index]); wb 121 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c return adev->wb.wb[ring->wptr_offs]; wb 156 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); wb 739 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0; wb 85 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c return adev->wb.wb[ring->wptr_offs]; wb 108 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); wb 179 drivers/gpu/drm/amd/amdgpu/vce_v4_0.c adev->wb.wb[adev->vce.ring[0].wptr_offs] = 0; wb 1455 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c return adev->wb.wb[ring->wptr_offs]; wb 1476 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); wb 1679 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c return adev->wb.wb[ring->wptr_offs]; wb 1684 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c return adev->wb.wb[ring->wptr_offs]; wb 1703 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); wb 1710 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); wb 1819 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c return adev->wb.wb[ring->wptr_offs]; wb 1836 drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); wb 976 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c return adev->wb.wb[ring->wptr_offs]; wb 993 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); wb 1060 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c return adev->wb.wb[ring->wptr_offs]; wb 1065 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c return adev->wb.wb[ring->wptr_offs]; wb 1084 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); wb 1091 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); wb 1155 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c return adev->wb.wb[ring->wptr_offs]; wb 1172 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); wb 1849 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height; wb 1850 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width; wb 1851 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width; wb 1852 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height; wb 1853 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1; wb 1854 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1; wb 1855 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c; wb 1856 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c; wb 1857 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c pipes[pipe_cnt].dout.wb.wb_hratio = 1.0; wb 1858 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c pipes[pipe_cnt].dout.wb.wb_vratio = 1.0; wb 1861 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8; wb 1863 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10; wb 1865 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32; wb 280 drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h struct writeback_st wb; wb 462 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c dout->wb.wb_src_height; wb 464 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c dout->wb.wb_src_width; wb 466 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c dout->wb.wb_dst_width; wb 468 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c dout->wb.wb_dst_height; wb 470 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c dout->wb.wb_hratio; wb 472 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c dout->wb.wb_vratio; wb 474 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c (enum source_format_class) (dout->wb.wb_pixel_format); wb 476 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c dout->wb.wb_htaps_luma; wb 478 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c dout->wb.wb_vtaps_luma; wb 480 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c dout->wb.wb_htaps_luma; wb 482 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c dout->wb.wb_vtaps_luma; wb 484 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c dout->wb.wb_htaps_chroma; wb 486 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c dout->wb.wb_vtaps_chroma; wb 488 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c dout->wb.wb_hratio; wb 490 drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c dout->wb.wb_vratio; wb 158 drivers/gpu/drm/lima/lima_pp.c static void lima_pp_write_frame(struct lima_ip *ip, u32 *frame, u32 *wb) wb 167 drivers/gpu/drm/lima/lima_pp.c writel(wb[n++], ip->iomem + LIMA_PP_WB(i) + j * 4); wb 326 drivers/gpu/drm/lima/lima_pp.c lima_pp_write_frame(ip, frame->frame, frame->wb); wb 353 drivers/gpu/drm/lima/lima_pp.c lima_pp_write_frame(ip, frame->frame, frame->wb); wb 1395 drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h case 0: return (mdp5_cfg->wb.base[0]); wb 1396 drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h case 1: return (mdp5_cfg->wb.base[1]); wb 1397 drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h case 2: return (mdp5_cfg->wb.base[2]); wb 1398 drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h case 3: return (mdp5_cfg->wb.base[3]); wb 1399 drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h case 4: return (mdp5_cfg->wb.base[4]); wb 3753 drivers/gpu/drm/radeon/cik.c } else if (rdev->wb.enabled) { wb 4078 drivers/gpu/drm/radeon/cik.c WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); wb 4096 drivers/gpu/drm/radeon/cik.c WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); wb 4097 drivers/gpu/drm/radeon/cik.c WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); wb 4102 drivers/gpu/drm/radeon/cik.c if (!rdev->wb.enabled) wb 4132 drivers/gpu/drm/radeon/cik.c if (rdev->wb.enabled) wb 4133 drivers/gpu/drm/radeon/cik.c rptr = rdev->wb.wb[ring->rptr_offs/4]; wb 4158 drivers/gpu/drm/radeon/cik.c if (rdev->wb.enabled) { wb 4159 drivers/gpu/drm/radeon/cik.c rptr = rdev->wb.wb[ring->rptr_offs/4]; wb 4176 drivers/gpu/drm/radeon/cik.c if (rdev->wb.enabled) { wb 4178 drivers/gpu/drm/radeon/cik.c wptr = rdev->wb.wb[ring->wptr_offs/4]; wb 4194 drivers/gpu/drm/radeon/cik.c rdev->wb.wb[ring->wptr_offs/4] = ring->wptr; wb 4691 drivers/gpu/drm/radeon/cik.c wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP1_WPTR_OFFSET; wb 4693 drivers/gpu/drm/radeon/cik.c wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP2_WPTR_OFFSET; wb 4702 drivers/gpu/drm/radeon/cik.c wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET; wb 4704 drivers/gpu/drm/radeon/cik.c wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET; wb 6990 drivers/gpu/drm/radeon/cik.c if (rdev->wb.enabled) wb 6994 drivers/gpu/drm/radeon/cik.c WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); wb 6995 drivers/gpu/drm/radeon/cik.c WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); wb 7500 drivers/gpu/drm/radeon/cik.c if (rdev->wb.enabled) wb 7501 drivers/gpu/drm/radeon/cik.c wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); wb 68 drivers/gpu/drm/radeon/cik_sdma.c if (rdev->wb.enabled) { wb 69 drivers/gpu/drm/radeon/cik_sdma.c rptr = rdev->wb.wb[ring->rptr_offs/4]; wb 139 drivers/gpu/drm/radeon/cik_sdma.c if (rdev->wb.enabled) { wb 401 drivers/gpu/drm/radeon/cik_sdma.c upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); wb 403 drivers/gpu/drm/radeon/cik_sdma.c ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); wb 405 drivers/gpu/drm/radeon/cik_sdma.c if (rdev->wb.enabled) wb 659 drivers/gpu/drm/radeon/cik_sdma.c gpu_addr = rdev->wb.gpu_addr + index; wb 662 drivers/gpu/drm/radeon/cik_sdma.c rdev->wb.wb[index/4] = cpu_to_le32(tmp); wb 677 drivers/gpu/drm/radeon/cik_sdma.c tmp = le32_to_cpu(rdev->wb.wb[index/4]); wb 716 drivers/gpu/drm/radeon/cik_sdma.c gpu_addr = rdev->wb.gpu_addr + index; wb 719 drivers/gpu/drm/radeon/cik_sdma.c rdev->wb.wb[index/4] = cpu_to_le32(tmp); wb 751 drivers/gpu/drm/radeon/cik_sdma.c tmp = le32_to_cpu(rdev->wb.wb[index/4]); wb 2945 drivers/gpu/drm/radeon/evergreen.c } else if (rdev->wb.enabled) { wb 3103 drivers/gpu/drm/radeon/evergreen.c ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); wb 3104 drivers/gpu/drm/radeon/evergreen.c WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); wb 3105 drivers/gpu/drm/radeon/evergreen.c WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); wb 3107 drivers/gpu/drm/radeon/evergreen.c if (rdev->wb.enabled) wb 4680 drivers/gpu/drm/radeon/evergreen.c if (rdev->wb.enabled) wb 4681 drivers/gpu/drm/radeon/evergreen.c wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); wb 72 drivers/gpu/drm/radeon/evergreen_dma.c if (rdev->wb.enabled) { wb 1479 drivers/gpu/drm/radeon/ni.c if (rdev->wb.enabled) wb 1480 drivers/gpu/drm/radeon/ni.c rptr = rdev->wb.wb[ring->rptr_offs/4]; wb 1685 drivers/gpu/drm/radeon/ni.c WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); wb 1702 drivers/gpu/drm/radeon/ni.c addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET; wb 58 drivers/gpu/drm/radeon/ni_dma.c if (rdev->wb.enabled) { wb 59 drivers/gpu/drm/radeon/ni_dma.c rptr = rdev->wb.wb[ring->rptr_offs/4]; wb 128 drivers/gpu/drm/radeon/ni_dma.c if (rdev->wb.enabled) { wb 223 drivers/gpu/drm/radeon/ni_dma.c upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF); wb 225 drivers/gpu/drm/radeon/ni_dma.c ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); wb 227 drivers/gpu/drm/radeon/ni_dma.c if (rdev->wb.enabled) wb 1067 drivers/gpu/drm/radeon/r100.c if (rdev->wb.enabled) wb 1068 drivers/gpu/drm/radeon/r100.c rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); wb 1188 drivers/gpu/drm/radeon/r100.c S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2)); wb 1189 drivers/gpu/drm/radeon/r100.c WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET); wb 1191 drivers/gpu/drm/radeon/r100.c if (rdev->wb.enabled) wb 2622 drivers/gpu/drm/radeon/r600.c if (rdev->wb.enabled) wb 2623 drivers/gpu/drm/radeon/r600.c rptr = rdev->wb.wb[ring->rptr_offs/4]; wb 2748 drivers/gpu/drm/radeon/r600.c ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); wb 2749 drivers/gpu/drm/radeon/r600.c WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); wb 2750 drivers/gpu/drm/radeon/r600.c WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); wb 2752 drivers/gpu/drm/radeon/r600.c if (rdev->wb.enabled) wb 2876 drivers/gpu/drm/radeon/r600.c if (rdev->wb.use_event) { wb 3380 drivers/gpu/drm/radeon/r600.c } else if (rdev->wb.enabled) { wb 3717 drivers/gpu/drm/radeon/r600.c if (rdev->wb.enabled) wb 3721 drivers/gpu/drm/radeon/r600.c WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); wb 3722 drivers/gpu/drm/radeon/r600.c WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); wb 4044 drivers/gpu/drm/radeon/r600.c if (rdev->wb.enabled) wb 4045 drivers/gpu/drm/radeon/r600.c wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); wb 56 drivers/gpu/drm/radeon/r600_dma.c if (rdev->wb.enabled) wb 57 drivers/gpu/drm/radeon/r600_dma.c rptr = rdev->wb.wb[ring->rptr_offs/4]; wb 144 drivers/gpu/drm/radeon/r600_dma.c upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF); wb 146 drivers/gpu/drm/radeon/r600_dma.c ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC)); wb 148 drivers/gpu/drm/radeon/r600_dma.c if (rdev->wb.enabled) wb 244 drivers/gpu/drm/radeon/r600_dma.c gpu_addr = rdev->wb.gpu_addr + index; wb 247 drivers/gpu/drm/radeon/r600_dma.c rdev->wb.wb[index/4] = cpu_to_le32(tmp); wb 261 drivers/gpu/drm/radeon/r600_dma.c tmp = le32_to_cpu(rdev->wb.wb[index/4]); wb 351 drivers/gpu/drm/radeon/r600_dma.c gpu_addr = rdev->wb.gpu_addr + index; wb 382 drivers/gpu/drm/radeon/r600_dma.c tmp = le32_to_cpu(rdev->wb.wb[index/4]); wb 409 drivers/gpu/drm/radeon/r600_dma.c if (rdev->wb.enabled) { wb 1129 drivers/gpu/drm/radeon/radeon.h volatile uint32_t *wb; wb 2386 drivers/gpu/drm/radeon/radeon.h struct radeon_wb wb; wb 423 drivers/gpu/drm/radeon/radeon_device.c rdev->wb.enabled = false; wb 437 drivers/gpu/drm/radeon/radeon_device.c if (rdev->wb.wb_obj) { wb 438 drivers/gpu/drm/radeon/radeon_device.c if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) { wb 439 drivers/gpu/drm/radeon/radeon_device.c radeon_bo_kunmap(rdev->wb.wb_obj); wb 440 drivers/gpu/drm/radeon/radeon_device.c radeon_bo_unpin(rdev->wb.wb_obj); wb 441 drivers/gpu/drm/radeon/radeon_device.c radeon_bo_unreserve(rdev->wb.wb_obj); wb 443 drivers/gpu/drm/radeon/radeon_device.c radeon_bo_unref(&rdev->wb.wb_obj); wb 444 drivers/gpu/drm/radeon/radeon_device.c rdev->wb.wb = NULL; wb 445 drivers/gpu/drm/radeon/radeon_device.c rdev->wb.wb_obj = NULL; wb 462 drivers/gpu/drm/radeon/radeon_device.c if (rdev->wb.wb_obj == NULL) { wb 465 drivers/gpu/drm/radeon/radeon_device.c &rdev->wb.wb_obj); wb 470 drivers/gpu/drm/radeon/radeon_device.c r = radeon_bo_reserve(rdev->wb.wb_obj, false); wb 475 drivers/gpu/drm/radeon/radeon_device.c r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, wb 476 drivers/gpu/drm/radeon/radeon_device.c &rdev->wb.gpu_addr); wb 478 drivers/gpu/drm/radeon/radeon_device.c radeon_bo_unreserve(rdev->wb.wb_obj); wb 483 drivers/gpu/drm/radeon/radeon_device.c r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); wb 484 drivers/gpu/drm/radeon/radeon_device.c radeon_bo_unreserve(rdev->wb.wb_obj); wb 493 drivers/gpu/drm/radeon/radeon_device.c memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE); wb 495 drivers/gpu/drm/radeon/radeon_device.c rdev->wb.use_event = false; wb 498 drivers/gpu/drm/radeon/radeon_device.c rdev->wb.enabled = false; wb 502 drivers/gpu/drm/radeon/radeon_device.c rdev->wb.enabled = false; wb 505 drivers/gpu/drm/radeon/radeon_device.c rdev->wb.enabled = false; wb 507 drivers/gpu/drm/radeon/radeon_device.c rdev->wb.enabled = true; wb 510 drivers/gpu/drm/radeon/radeon_device.c rdev->wb.use_event = true; wb 516 drivers/gpu/drm/radeon/radeon_device.c rdev->wb.enabled = true; wb 517 drivers/gpu/drm/radeon/radeon_device.c rdev->wb.use_event = true; wb 520 drivers/gpu/drm/radeon/radeon_device.c dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); wb 71 drivers/gpu/drm/radeon/radeon_fence.c if (likely(rdev->wb.enabled || !drv->scratch_reg)) { wb 94 drivers/gpu/drm/radeon/radeon_fence.c if (likely(rdev->wb.enabled || !drv->scratch_reg)) { wb 839 drivers/gpu/drm/radeon/radeon_fence.c if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) { wb 843 drivers/gpu/drm/radeon/radeon_fence.c rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; wb 844 drivers/gpu/drm/radeon/radeon_fence.c rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + wb 863 drivers/gpu/drm/radeon/radeon_fence.c rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4]; wb 864 drivers/gpu/drm/radeon/radeon_fence.c rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index; wb 304 drivers/gpu/drm/radeon/radeon_ring.c else if (rdev->wb.enabled) wb 416 drivers/gpu/drm/radeon/radeon_ring.c if (rdev->wb.enabled) { wb 418 drivers/gpu/drm/radeon/radeon_ring.c ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index; wb 419 drivers/gpu/drm/radeon/radeon_ring.c ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4]; wb 3424 drivers/gpu/drm/radeon/si.c } else if (rdev->wb.enabled) { wb 3664 drivers/gpu/drm/radeon/si.c WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); wb 3682 drivers/gpu/drm/radeon/si.c WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); wb 3683 drivers/gpu/drm/radeon/si.c WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); wb 3685 drivers/gpu/drm/radeon/si.c if (rdev->wb.enabled) wb 3713 drivers/gpu/drm/radeon/si.c WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); wb 3714 drivers/gpu/drm/radeon/si.c WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); wb 3737 drivers/gpu/drm/radeon/si.c WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); wb 3738 drivers/gpu/drm/radeon/si.c WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); wb 6018 drivers/gpu/drm/radeon/si.c if (rdev->wb.enabled) wb 6022 drivers/gpu/drm/radeon/si.c WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); wb 6023 drivers/gpu/drm/radeon/si.c WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); wb 6214 drivers/gpu/drm/radeon/si.c if (rdev->wb.enabled) wb 6215 drivers/gpu/drm/radeon/si.c wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); wb 2206 drivers/hid/hid-wiimote-modules.c __u8 sx, sy, tb, wb, bd, bm, bp, bo, br, bb, bg, by, bu; wb 2242 drivers/hid/hid-wiimote-modules.c wb = ext[3] & 0x1f; wb 2262 drivers/hid/hid-wiimote-modules.c input_report_abs(wdata->extension.input, ABS_HAT1X, wb - 0x10); wb 168 drivers/md/bcache/sysfs.c int wb = dc->writeback_running; wb 196 drivers/md/bcache/sysfs.c wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0); wb 219 drivers/md/bcache/sysfs.c wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 wb 224 drivers/md/bcache/sysfs.c wb ? dc->writeback_rate_proportional << 9 : 0); wb 226 drivers/md/bcache/sysfs.c wb ? dc->writeback_rate_integral_scaled << 9 : 0); wb 227 drivers/md/bcache/sysfs.c bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0); wb 228 drivers/md/bcache/sysfs.c next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(), wb 1332 drivers/md/dm-writecache.c struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio); wb 1333 drivers/md/dm-writecache.c struct dm_writecache *wc = wb->wc; wb 1339 drivers/md/dm-writecache.c list_add_tail(&wb->endio_entry, &wc->endio_list); wb 1360 drivers/md/dm-writecache.c struct writeback_struct *wb; wb 1365 drivers/md/dm-writecache.c wb = list_entry(list->next, struct writeback_struct, endio_entry); wb 1366 drivers/md/dm-writecache.c list_del(&wb->endio_entry); wb 1368 drivers/md/dm-writecache.c if (unlikely(wb->bio.bi_status != BLK_STS_OK)) wb 1369 drivers/md/dm-writecache.c writecache_error(wc, blk_status_to_errno(wb->bio.bi_status), wb 1370 drivers/md/dm-writecache.c "write error %d", wb->bio.bi_status); wb 1373 drivers/md/dm-writecache.c e = wb->wc_list[i]; wb 1388 drivers/md/dm-writecache.c } while (++i < wb->wc_list_n); wb 1390 drivers/md/dm-writecache.c if (wb->wc_list != wb->wc_list_inline) wb 1391 drivers/md/dm-writecache.c kfree(wb->wc_list); wb 1392 drivers/md/dm-writecache.c bio_put(&wb->bio); wb 1472 drivers/md/dm-writecache.c static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp) wb 1474 drivers/md/dm-writecache.c struct dm_writecache *wc = wb->wc; wb 1479 drivers/md/dm-writecache.c return bio_add_page(&wb->bio, persistent_memory_page(address), wb 1505 drivers/md/dm-writecache.c struct writeback_struct *wb; wb 1516 drivers/md/dm-writecache.c wb = container_of(bio, struct writeback_struct, bio); wb 1517 drivers/md/dm-writecache.c wb->wc = wc; wb 1522 drivers/md/dm-writecache.c unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *), wb 1525 drivers/md/dm-writecache.c wb->wc_list = wb->wc_list_inline; wb 1529 drivers/md/dm-writecache.c BUG_ON(!wc_add_block(wb, e, GFP_NOIO)); wb 1531 drivers/md/dm-writecache.c wb->wc_list[0] = e; wb 1532 drivers/md/dm-writecache.c wb->wc_list_n = 1; wb 1534 drivers/md/dm-writecache.c while (wbl->size && wb->wc_list_n < max_pages) { wb 1539 drivers/md/dm-writecache.c if (!wc_add_block(wb, f, GFP_NOWAIT | __GFP_NOWARN)) wb 1543 drivers/md/dm-writecache.c wb->wc_list[wb->wc_list_n++] = f; wb 1814 drivers/md/dm.c r = bdi->wb.congested->state & bdi_bits; wb 53 drivers/media/dvb-frontends/dib3000mb.c u8 wb[] = { ((reg >> 8) | 0x80) & 0xff, reg & 0xff }; wb 56 drivers/media/dvb-frontends/dib3000mb.c { .addr = state->config.demod_address, .flags = 0, .buf = wb, .len = 2 }, wb 278 drivers/media/i2c/m5mols/m5mols_controls.c static const unsigned short wb[][2] = { wb 293 drivers/media/i2c/m5mols/m5mols_controls.c for (i = 0; i < ARRAY_SIZE(wb); i++) { wb 295 drivers/media/i2c/m5mols/m5mols_controls.c if (wb[i][0] != val) wb 299 drivers/media/i2c/m5mols/m5mols_controls.c "Setting white balance to: %#x\n", wb[i][0]); wb 301 drivers/media/i2c/m5mols/m5mols_controls.c awb = wb[i][0] == V4L2_WHITE_BALANCE_AUTO; wb 308 drivers/media/i2c/m5mols/m5mols_controls.c ret = m5mols_write(sd, AWB_MANUAL, wb[i][1]); wb 144 drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c static const unsigned short wb[][2] = { wb 154 drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c for (i = 0; i < ARRAY_SIZE(wb); i++) { wb 155 drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c if (wb[i][0] != val) wb 162 drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c return s5c73m3_isp_command(state, COMM_AWB_MODE, wb[i][1]); wb 237 drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c struct venc_h264_vpu_buf *wb = inst->vsi->work_bufs; wb 258 drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c inst->work_bufs[i].size = wb[i].size; wb 261 drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c inst->vpu_inst.dev, wb[i].vpua); wb 281 drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c wb[i].vpua); wb 283 drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c wb[i].size); wb 286 drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c wb[i].iova = inst->work_bufs[i].dma_addr; wb 161 drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c struct venc_vp8_vpu_buf *wb = inst->vsi->work_bufs; wb 166 drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c if (wb[i].size == 0) wb 178 drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c inst->work_bufs[i].size = wb[i].size; wb 196 drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c wb[i].vpua); wb 197 drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c memcpy(inst->work_bufs[i].va, tmp_va, wb[i].size); wb 199 drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c wb[i].iova = inst->work_bufs[i].dma_addr; wb 2228 drivers/media/usb/dvb-usb/dib0700_devices.c u8 wb[4] = { 0xc >> 8, 0xc & 0xff, 0, 0 }; wb 2231 drivers/media/usb/dvb-usb/dib0700_devices.c {.addr = 0x1e >> 1, .flags = 0, .buf = wb, .len = 2}, wb 2257 drivers/media/usb/dvb-usb/dib0700_devices.c wb[2] = (data[index_data + 1] >> 8) & 0xff; wb 2258 drivers/media/usb/dvb-usb/dib0700_devices.c wb[3] = (data[index_data + 1]) & 0xff; wb 2261 drivers/media/usb/dvb-usb/dib0700_devices.c wb[0] = (data[index_data] >> 8) & 0xff; wb 2262 drivers/media/usb/dvb-usb/dib0700_devices.c wb[1] = (data[index_data]) & 0xff; wb 2266 drivers/media/usb/dvb-usb/dib0700_devices.c wb[2] |= rb[0]; wb 2267 drivers/media/usb/dvb-usb/dib0700_devices.c wb[3] |= rb[1] & ~(3 << 4); wb 2270 drivers/media/usb/dvb-usb/dib0700_devices.c wb[0] = (data[index_data] >> 8)&0xff; wb 2271 drivers/media/usb/dvb-usb/dib0700_devices.c wb[1] = (data[index_data])&0xff; wb 802 drivers/mtd/nand/raw/nandsim.c struct weak_block *wb; wb 821 drivers/mtd/nand/raw/nandsim.c wb = kzalloc(sizeof(*wb), GFP_KERNEL); wb 822 drivers/mtd/nand/raw/nandsim.c if (!wb) { wb 826 drivers/mtd/nand/raw/nandsim.c wb->erase_block_no = erase_block_no; wb 827 drivers/mtd/nand/raw/nandsim.c wb->max_erases = max_erases; wb 828 drivers/mtd/nand/raw/nandsim.c list_add(&wb->list, &weak_blocks); wb 835 drivers/mtd/nand/raw/nandsim.c struct weak_block *wb; wb 837 drivers/mtd/nand/raw/nandsim.c list_for_each_entry(wb, &weak_blocks, list) wb 838 drivers/mtd/nand/raw/nandsim.c if (wb->erase_block_no == erase_block_no) { wb 839 drivers/mtd/nand/raw/nandsim.c if (wb->erases_done >= wb->max_erases) wb 841 drivers/mtd/nand/raw/nandsim.c wb->erases_done += 1; wb 70 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h u8 wb) wb 76 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h else if (wb && CHIP_IS_E1(bp)) wb 85 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h u32 len, u8 wb) wb 96 drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb); wb 314 drivers/net/ethernet/freescale/enetc/enetc.c tstamp_lo = le32_to_cpu(txbd->wb.tstamp); wb 369 drivers/net/ethernet/freescale/enetc/enetc_hw.h } wb; /* writeback descriptor */ wb 528 drivers/net/ethernet/intel/e1000/e1000_hw.h } wb; /* writeback */ wb 559 drivers/net/ethernet/intel/e1000/e1000_hw.h } wb; /* writeback */ wb 225 drivers/net/ethernet/intel/e1000e/hw.h } wb; /* writeback */ wb 261 drivers/net/ethernet/intel/e1000e/hw.h } wb; /* writeback */ wb 366 drivers/net/ethernet/intel/e1000e/netdev.c le32_to_cpu(rx_desc_ps->wb.middle.status_error); wb 432 drivers/net/ethernet/intel/e1000e/netdev.c staterr = le32_to_cpu(rx_desc->wb.upper.status_error); wb 924 drivers/net/ethernet/intel/e1000e/netdev.c staterr = le32_to_cpu(rx_desc->wb.upper.status_error); wb 954 drivers/net/ethernet/intel/e1000e/netdev.c length = le16_to_cpu(rx_desc->wb.upper.length); wb 1023 drivers/net/ethernet/intel/e1000e/netdev.c e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); wb 1026 drivers/net/ethernet/intel/e1000e/netdev.c rx_desc->wb.upper.vlan); wb 1029 drivers/net/ethernet/intel/e1000e/netdev.c rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); wb 1042 drivers/net/ethernet/intel/e1000e/netdev.c staterr = le32_to_cpu(rx_desc->wb.upper.status_error); wb 1320 drivers/net/ethernet/intel/e1000e/netdev.c staterr = le32_to_cpu(rx_desc->wb.middle.status_error); wb 1365 drivers/net/ethernet/intel/e1000e/netdev.c length = le16_to_cpu(rx_desc->wb.middle.length0); wb 1380 drivers/net/ethernet/intel/e1000e/netdev.c int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); wb 1421 drivers/net/ethernet/intel/e1000e/netdev.c length = le16_to_cpu(rx_desc->wb.upper.length[j]); wb 1450 drivers/net/ethernet/intel/e1000e/netdev.c e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); wb 1452 drivers/net/ethernet/intel/e1000e/netdev.c if (rx_desc->wb.upper.header_status & wb 1457 drivers/net/ethernet/intel/e1000e/netdev.c rx_desc->wb.middle.vlan); wb 1460 drivers/net/ethernet/intel/e1000e/netdev.c rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); wb 1474 drivers/net/ethernet/intel/e1000e/netdev.c staterr = le32_to_cpu(rx_desc->wb.middle.status_error); wb 1523 drivers/net/ethernet/intel/e1000e/netdev.c staterr = le32_to_cpu(rx_desc->wb.upper.status_error); wb 1551 drivers/net/ethernet/intel/e1000e/netdev.c length = le16_to_cpu(rx_desc->wb.upper.length); wb 1626 drivers/net/ethernet/intel/e1000e/netdev.c e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); wb 1640 drivers/net/ethernet/intel/e1000e/netdev.c rx_desc->wb.upper.vlan); wb 1643 drivers/net/ethernet/intel/e1000e/netdev.c rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); wb 1656 drivers/net/ethernet/intel/e1000e/netdev.c staterr = le32_to_cpu(rx_desc->wb.upper.status_error); wb 539 drivers/net/ethernet/intel/i40e/i40e_txrx.c qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len); wb 544 drivers/net/ethernet/intel/i40e/i40e_txrx.c pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id); wb 545 drivers/net/ethernet/intel/i40e/i40e_txrx.c if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) || wb 563 drivers/net/ethernet/intel/i40e/i40e_txrx.c if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) && wb 592 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_desc->wb.qword0.hi_dword.fd_id); wb 1606 drivers/net/ethernet/intel/i40e/i40e_txrx.c rx_desc->wb.qword1.status_error_len = 0; wb 1642 drivers/net/ethernet/intel/i40e/i40e_txrx.c qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); wb 1760 drivers/net/ethernet/intel/i40e/i40e_txrx.c if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { wb 1761 drivers/net/ethernet/intel/i40e/i40e_txrx.c hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); wb 1780 drivers/net/ethernet/intel/i40e/i40e_txrx.c u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); wb 1799 drivers/net/ethernet/intel/i40e/i40e_txrx.c u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1; wb 2360 drivers/net/ethernet/intel/i40e/i40e_txrx.c qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); wb 190 drivers/net/ethernet/intel/i40e/i40e_txrx.h return !!(rx_desc->wb.qword1.status_error_len & wb 679 drivers/net/ethernet/intel/i40e/i40e_type.h } wb; /* writeback */ wb 728 drivers/net/ethernet/intel/i40e/i40e_type.h } wb; /* writeback */ wb 347 drivers/net/ethernet/intel/i40e/i40e_xsk.c rx_desc->wb.qword1.status_error_len = 0; wb 555 drivers/net/ethernet/intel/i40e/i40e_xsk.c qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); wb 917 drivers/net/ethernet/intel/iavf/iavf_txrx.c rx_desc->wb.qword1.status_error_len = 0; wb 953 drivers/net/ethernet/intel/iavf/iavf_txrx.c qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); wb 1064 drivers/net/ethernet/intel/iavf/iavf_txrx.c if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { wb 1065 drivers/net/ethernet/intel/iavf/iavf_txrx.c hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); wb 1507 drivers/net/ethernet/intel/iavf/iavf_txrx.c qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); wb 1565 drivers/net/ethernet/intel/iavf/iavf_txrx.c qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); wb 1574 drivers/net/ethernet/intel/iavf/iavf_txrx.c le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; wb 170 drivers/net/ethernet/intel/iavf/iavf_txrx.h return !!(rx_desc->wb.qword1.status_error_len & wb 228 drivers/net/ethernet/intel/iavf/iavf_type.h } wb; /* writeback */ wb 277 drivers/net/ethernet/intel/iavf/iavf_type.h } wb; /* writeback */ wb 621 drivers/net/ethernet/intel/ice/ice_ethtool.c if (!(rx_desc->wb.status_error0 & wb 40 drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h } wb; /* writeback */ wb 147 drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h } wb; /* writeback */ wb 507 drivers/net/ethernet/intel/ice/ice_txrx.c rx_desc->wb.status_error0 = 0; wb 806 drivers/net/ethernet/intel/ice/ice_txrx.c return !!(rx_desc->wb.status_error0 & wb 873 drivers/net/ethernet/intel/ice/ice_txrx.c if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) wb 898 drivers/net/ethernet/intel/ice/ice_txrx.c rx_status = le16_to_cpu(rx_desc->wb.status_error0); wb 1039 drivers/net/ethernet/intel/ice/ice_txrx.c size = le16_to_cpu(rx_desc->wb.pkt_len) & wb 1073 drivers/net/ethernet/intel/ice/ice_txrx.c vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1); wb 1087 drivers/net/ethernet/intel/ice/ice_txrx.c rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) & wb 87 drivers/net/ethernet/intel/igb/e1000_82575.h } wb; /* writeback */ wb 106 drivers/net/ethernet/intel/igb/e1000_82575.h } wb; wb 371 drivers/net/ethernet/intel/igb/igb.h return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); wb 1815 drivers/net/ethernet/intel/igb/igb_ethtool.c while (rx_desc->wb.upper.length) { wb 517 drivers/net/ethernet/intel/igb/igb_main.c staterr = le32_to_cpu(rx_desc->wb.upper.status_error); wb 4527 drivers/net/ethernet/intel/igb/igb_main.c rx_desc->wb.upper.length = 0; wb 7773 drivers/net/ethernet/intel/igb/igb_main.c if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) wb 7877 drivers/net/ethernet/intel/igb/igb_main.c tx_buffer->next_to_watch->wb.status); wb 8145 drivers/net/ethernet/intel/igb/igb_main.c le32_to_cpu(rx_desc->wb.upper.status_error)); wb 8154 drivers/net/ethernet/intel/igb/igb_main.c le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), wb 8250 drivers/net/ethernet/intel/igb/igb_main.c vid = be16_to_cpu(rx_desc->wb.upper.vlan); wb 8252 drivers/net/ethernet/intel/igb/igb_main.c vid = le16_to_cpu(rx_desc->wb.upper.vlan); wb 8322 drivers/net/ethernet/intel/igb/igb_main.c size = le16_to_cpu(rx_desc->wb.upper.length); wb 8485 drivers/net/ethernet/intel/igb/igb_main.c rx_desc->wb.upper.length = 0; wb 259 drivers/net/ethernet/intel/igbvf/netdev.c staterr = le32_to_cpu(rx_desc->wb.upper.status_error); wb 274 drivers/net/ethernet/intel/igbvf/netdev.c hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) wb 280 drivers/net/ethernet/intel/igbvf/netdev.c length = le16_to_cpu(rx_desc->wb.upper.length); wb 354 drivers/net/ethernet/intel/igbvf/netdev.c rx_desc->wb.upper.vlan); wb 357 drivers/net/ethernet/intel/igbvf/netdev.c rx_desc->wb.upper.status_error = 0; wb 369 drivers/net/ethernet/intel/igbvf/netdev.c staterr = le32_to_cpu(rx_desc->wb.upper.status_error); wb 793 drivers/net/ethernet/intel/igbvf/netdev.c if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) wb 817 drivers/net/ethernet/intel/igbvf/netdev.c tx_desc->wb.status = 0; wb 64 drivers/net/ethernet/intel/igbvf/vf.h } wb; /* writeback */ wb 81 drivers/net/ethernet/intel/igbvf/vf.h } wb; wb 145 drivers/net/ethernet/intel/igc/igc.h return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); wb 22 drivers/net/ethernet/intel/igc/igc_base.h } wb; wb 76 drivers/net/ethernet/intel/igc/igc_base.h } wb; /* writeback */ wb 568 drivers/net/ethernet/intel/igc/igc_main.c rx_desc->wb.upper.length = 0; wb 1172 drivers/net/ethernet/intel/igc/igc_main.c le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), wb 1533 drivers/net/ethernet/intel/igc/igc_main.c rx_desc->wb.upper.length = 0; wb 1576 drivers/net/ethernet/intel/igc/igc_main.c size = le16_to_cpu(rx_desc->wb.upper.length); wb 1729 drivers/net/ethernet/intel/igc/igc_main.c if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) wb 1833 drivers/net/ethernet/intel/igc/igc_main.c tx_buffer->next_to_watch->wb.status); wb 504 drivers/net/ethernet/intel/ixgbe/ixgbe.h return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); wb 1905 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) wb 1927 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c while (rx_desc->wb.upper.length) { wb 436 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); wb 450 drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); wb 1156 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; wb 798 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (rx_desc->wb.upper.length) { wb 1140 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) wb 1431 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & wb 1437 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), wb 1453 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; wb 1472 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; wb 1621 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_desc->wb.upper.length = 0; wb 1699 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); wb 1747 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & wb 1757 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ntc = le32_to_cpu(rx_desc->wb.upper.status_error); wb 2306 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c size = le16_to_cpu(rx_desc->wb.upper.length); wb 4153 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c rx_desc->wb.upper.length = 0; wb 2850 drivers/net/ethernet/intel/ixgbe/ixgbe_type.h } wb; wb 2881 drivers/net/ethernet/intel/ixgbe/ixgbe_type.h } wb; /* writeback */ wb 356 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c rx_desc->wb.upper.length = 0; wb 455 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c size = le16_to_cpu(rx_desc->wb.upper.length); wb 654 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) wb 200 drivers/net/ethernet/intel/ixgbevf/defines.h } wb; wb 231 drivers/net/ethernet/intel/ixgbevf/defines.h } wb; /* writeback */ wb 543 drivers/net/ethernet/intel/ixgbevf/ipsec.c __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; wb 283 drivers/net/ethernet/intel/ixgbevf/ixgbevf.h return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); wb 294 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) wb 390 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c eop_desc, (eop_desc ? eop_desc->wb.status : 0), wb 452 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & wb 458 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), wb 516 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); wb 692 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_desc->wb.upper.length = 0; wb 1140 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c size = le16_to_cpu(rx_desc->wb.upper.length); wb 1936 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c rx_desc->wb.upper.length = 0; wb 247 drivers/staging/wilc1000/wilc_spi.c static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen) wb 256 drivers/staging/wilc1000/wilc_spi.c .tx_buf = wb, wb 287 drivers/staging/wilc1000/wilc_spi.c u8 wb[32], rb[32]; wb 296 drivers/staging/wilc1000/wilc_spi.c wb[0] = cmd; wb 299 drivers/staging/wilc1000/wilc_spi.c wb[1] = (u8)(adr >> 16); wb 300 drivers/staging/wilc1000/wilc_spi.c wb[2] = (u8)(adr >> 8); wb 301 drivers/staging/wilc1000/wilc_spi.c wb[3] = (u8)adr; wb 306 drivers/staging/wilc1000/wilc_spi.c wb[1] = (u8)(adr >> 8); wb 308 drivers/staging/wilc1000/wilc_spi.c wb[1] |= BIT(7); wb 309 drivers/staging/wilc1000/wilc_spi.c wb[2] = (u8)adr; wb 310 drivers/staging/wilc1000/wilc_spi.c wb[3] = 0x00; wb 315 drivers/staging/wilc1000/wilc_spi.c wb[1] = 0x00; wb 316 drivers/staging/wilc1000/wilc_spi.c wb[2] = 0x00; wb 317 drivers/staging/wilc1000/wilc_spi.c wb[3] = 0x00; wb 322 drivers/staging/wilc1000/wilc_spi.c wb[1] = 0x00; wb 323 drivers/staging/wilc1000/wilc_spi.c wb[2] = 0x00; wb 324 drivers/staging/wilc1000/wilc_spi.c wb[3] = 0x00; wb 329 drivers/staging/wilc1000/wilc_spi.c wb[1] = 0xff; wb 330 drivers/staging/wilc1000/wilc_spi.c wb[2] = 0xff; wb 331 drivers/staging/wilc1000/wilc_spi.c wb[3] = 0xff; wb 337 drivers/staging/wilc1000/wilc_spi.c wb[1] = (u8)(adr >> 16); wb 338 drivers/staging/wilc1000/wilc_spi.c wb[2] = (u8)(adr >> 8); wb 339 drivers/staging/wilc1000/wilc_spi.c wb[3] = (u8)adr; wb 340 drivers/staging/wilc1000/wilc_spi.c wb[4] = (u8)(sz >> 8); wb 341 drivers/staging/wilc1000/wilc_spi.c wb[5] = (u8)(sz); wb 347 drivers/staging/wilc1000/wilc_spi.c wb[1] = (u8)(adr >> 16); wb 348 drivers/staging/wilc1000/wilc_spi.c wb[2] = (u8)(adr >> 8); wb 349 drivers/staging/wilc1000/wilc_spi.c wb[3] = (u8)adr; wb 350 drivers/staging/wilc1000/wilc_spi.c wb[4] = (u8)(sz >> 16); wb 351 drivers/staging/wilc1000/wilc_spi.c wb[5] = (u8)(sz >> 8); wb 352 drivers/staging/wilc1000/wilc_spi.c wb[6] = (u8)(sz); wb 357 drivers/staging/wilc1000/wilc_spi.c wb[1] = (u8)(adr >> 8); wb 359 drivers/staging/wilc1000/wilc_spi.c wb[1] |= BIT(7); wb 360 drivers/staging/wilc1000/wilc_spi.c wb[2] = (u8)(adr); wb 361 drivers/staging/wilc1000/wilc_spi.c wb[3] = b[3]; wb 362 drivers/staging/wilc1000/wilc_spi.c wb[4] = b[2]; wb 363 drivers/staging/wilc1000/wilc_spi.c wb[5] = b[1]; wb 364 drivers/staging/wilc1000/wilc_spi.c wb[6] = b[0]; wb 369 drivers/staging/wilc1000/wilc_spi.c wb[1] = (u8)(adr >> 16); wb 370 drivers/staging/wilc1000/wilc_spi.c wb[2] = (u8)(adr >> 8); wb 371 drivers/staging/wilc1000/wilc_spi.c wb[3] = (u8)(adr); wb 372 drivers/staging/wilc1000/wilc_spi.c wb[4] = b[3]; wb 373 drivers/staging/wilc1000/wilc_spi.c wb[5] = b[2]; wb 374 drivers/staging/wilc1000/wilc_spi.c wb[6] = b[1]; wb 375 drivers/staging/wilc1000/wilc_spi.c wb[7] = b[0]; wb 388 drivers/staging/wilc1000/wilc_spi.c wb[len - 1] = (crc7(0x7f, (const u8 *)&wb[0], len - 1)) << 1; wb 414 drivers/staging/wilc1000/wilc_spi.c if (len2 > ARRAY_SIZE(wb)) { wb 416 drivers/staging/wilc1000/wilc_spi.c len2, ARRAY_SIZE(wb)); wb 421 drivers/staging/wilc1000/wilc_spi.c wb[wix] = 0; wb 424 drivers/staging/wilc1000/wilc_spi.c if (wilc_spi_tx_rx(wilc, wb, rb, len2)) { wb 156 drivers/usb/class/cdc-acm.c usb_kill_urb(acm->wb[i].urb); wb 169 drivers/usb/class/cdc-acm.c struct acm_wb *wb; wb 174 drivers/usb/class/cdc-acm.c wb = &acm->wb[wbn]; wb 175 drivers/usb/class/cdc-acm.c if (!wb->use) { wb 176 drivers/usb/class/cdc-acm.c wb->use = 1; wb 177 drivers/usb/class/cdc-acm.c wb->len = 0; wb 194 drivers/usb/class/cdc-acm.c n -= acm->wb[i].use; wb 202 drivers/usb/class/cdc-acm.c static void acm_write_done(struct acm *acm, struct acm_wb *wb) wb 204 drivers/usb/class/cdc-acm.c wb->use = 0; wb 215 drivers/usb/class/cdc-acm.c static int acm_start_wb(struct acm *acm, struct acm_wb *wb) wb 221 drivers/usb/class/cdc-acm.c wb->urb->transfer_buffer = wb->buf; wb 222 drivers/usb/class/cdc-acm.c wb->urb->transfer_dma = wb->dmah; wb 223 drivers/usb/class/cdc-acm.c wb->urb->transfer_buffer_length = wb->len; wb 224 drivers/usb/class/cdc-acm.c wb->urb->dev = acm->dev; wb 226 drivers/usb/class/cdc-acm.c rc = usb_submit_urb(wb->urb, GFP_ATOMIC); wb 231 drivers/usb/class/cdc-acm.c acm_write_done(acm, wb); wb 552 drivers/usb/class/cdc-acm.c struct acm_wb *wb = urb->context; wb 553 drivers/usb/class/cdc-acm.c struct acm *acm = wb->instance; wb 564 drivers/usb/class/cdc-acm.c acm_write_done(acm, wb); wb 728 drivers/usb/class/cdc-acm.c struct acm_wb *wb; wb 745 drivers/usb/class/cdc-acm.c wb = urb->context; wb 746 drivers/usb/class/cdc-acm.c wb->use = 0; wb 781 drivers/usb/class/cdc-acm.c struct acm_wb *wb; wb 794 drivers/usb/class/cdc-acm.c wb = &acm->wb[wbn]; wb 797 drivers/usb/class/cdc-acm.c wb->use = 0; wb 804 drivers/usb/class/cdc-acm.c memcpy(wb->buf, buf, count); wb 805 drivers/usb/class/cdc-acm.c wb->len = count; wb 809 drivers/usb/class/cdc-acm.c wb->use = 0; wb 815 drivers/usb/class/cdc-acm.c usb_anchor_urb(wb->urb, &acm->delayed); wb 820 drivers/usb/class/cdc-acm.c stat = acm_start_wb(acm, wb); wb 1110 drivers/usb/class/cdc-acm.c struct acm_wb *wb; wb 1112 drivers/usb/class/cdc-acm.c for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) wb 1113 drivers/usb/class/cdc-acm.c usb_free_coherent(acm->dev, acm->writesize, wb->buf, wb->dmah); wb 1129 drivers/usb/class/cdc-acm.c struct acm_wb *wb; wb 1131 drivers/usb/class/cdc-acm.c for (wb = &acm->wb[0], i = 0; i < ACM_NW; i++, wb++) { wb 1132 drivers/usb/class/cdc-acm.c wb->buf = usb_alloc_coherent(acm->dev, acm->writesize, GFP_KERNEL, wb 1133 drivers/usb/class/cdc-acm.c &wb->dmah); wb 1134 drivers/usb/class/cdc-acm.c if (!wb->buf) { wb 1137 drivers/usb/class/cdc-acm.c --wb; wb 1139 drivers/usb/class/cdc-acm.c wb->buf, wb->dmah); wb 1419 drivers/usb/class/cdc-acm.c struct acm_wb *snd = &(acm->wb[i]); wb 1520 drivers/usb/class/cdc-acm.c usb_free_urb(acm->wb[i].urb); wb 1574 drivers/usb/class/cdc-acm.c usb_free_urb(acm->wb[i].urb); wb 95 drivers/usb/class/cdc-acm.h struct acm_wb wb[ACM_NW]; wb 100 fs/afs/file.c af->wb = wbk; wb 106 fs/afs/file.c af->wb = p; wb 177 fs/afs/file.c if (af->wb) wb 178 fs/afs/file.c afs_put_wb_key(af->wb); wb 211 fs/afs/internal.h struct afs_wb_key *wb; /* Writeback key record for this file */ wb 71 fs/f2fs/node.c if (sbi->sb->s_bdi->wb.dirty_exceeded) wb 94 fs/f2fs/node.c if (!sbi->sb->s_bdi->wb.dirty_exceeded) wb 818 fs/f2fs/segment.h if (sbi->sb->s_bdi->wb.dirty_exceeded) wb 86 fs/fs-writeback.c static bool wb_io_lists_populated(struct bdi_writeback *wb) wb 88 fs/fs-writeback.c if (wb_has_dirty_io(wb)) { wb 91 fs/fs-writeback.c set_bit(WB_has_dirty_io, &wb->state); wb 92 fs/fs-writeback.c WARN_ON_ONCE(!wb->avg_write_bandwidth); wb 93 fs/fs-writeback.c atomic_long_add(wb->avg_write_bandwidth, wb 94 fs/fs-writeback.c &wb->bdi->tot_write_bandwidth); wb 99 fs/fs-writeback.c static void wb_io_lists_depopulated(struct bdi_writeback *wb) wb 101 fs/fs-writeback.c if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) && wb 102 fs/fs-writeback.c list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) { wb 103 fs/fs-writeback.c clear_bit(WB_has_dirty_io, &wb->state); wb 104 fs/fs-writeback.c WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth, wb 105 fs/fs-writeback.c &wb->bdi->tot_write_bandwidth) < 0); wb 120 fs/fs-writeback.c struct bdi_writeback *wb, wb 123 fs/fs-writeback.c assert_spin_locked(&wb->list_lock); wb 128 fs/fs-writeback.c if (head != &wb->b_dirty_time) wb 129 fs/fs-writeback.c return wb_io_lists_populated(wb); wb 131 fs/fs-writeback.c wb_io_lists_depopulated(wb); wb 144 fs/fs-writeback.c struct bdi_writeback *wb) wb 146 fs/fs-writeback.c assert_spin_locked(&wb->list_lock); wb 149 fs/fs-writeback.c wb_io_lists_depopulated(wb); wb 152 fs/fs-writeback.c static void wb_wakeup(struct bdi_writeback *wb) wb 154 fs/fs-writeback.c spin_lock_bh(&wb->work_lock); wb 155 fs/fs-writeback.c if (test_bit(WB_registered, &wb->state)) wb 156 fs/fs-writeback.c mod_delayed_work(bdi_wq, &wb->dwork, 0); wb 157 fs/fs-writeback.c spin_unlock_bh(&wb->work_lock); wb 160 fs/fs-writeback.c static void finish_writeback_work(struct bdi_writeback *wb, wb 176 fs/fs-writeback.c static void wb_queue_work(struct bdi_writeback *wb, wb 179 fs/fs-writeback.c trace_writeback_queue(wb, work); wb 184 fs/fs-writeback.c spin_lock_bh(&wb->work_lock); wb 186 fs/fs-writeback.c if (test_bit(WB_registered, &wb->state)) { wb 187 fs/fs-writeback.c list_add_tail(&work->list, &wb->work_list); wb 188 fs/fs-writeback.c mod_delayed_work(bdi_wq, &wb->dwork, 0); wb 190 fs/fs-writeback.c finish_writeback_work(wb, work); wb 192 fs/fs-writeback.c spin_unlock_bh(&wb->work_lock); wb 252 fs/fs-writeback.c struct bdi_writeback *wb = NULL; wb 259 fs/fs-writeback.c wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); wb 263 fs/fs-writeback.c wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC); wb 268 fs/fs-writeback.c if (!wb) wb 269 fs/fs-writeback.c wb = &bdi->wb; wb 275 fs/fs-writeback.c if (unlikely(cmpxchg(&inode->i_wb, NULL, wb))) wb 276 fs/fs-writeback.c wb_put(wb); wb 291 fs/fs-writeback.c __acquires(&wb->list_lock) wb 294 fs/fs-writeback.c struct bdi_writeback *wb = inode_to_wb(inode); wb 302 fs/fs-writeback.c wb_get(wb); wb 304 fs/fs-writeback.c spin_lock(&wb->list_lock); wb 307 fs/fs-writeback.c if (likely(wb == inode->i_wb)) { wb 308 fs/fs-writeback.c wb_put(wb); /* @inode already has ref */ wb 309 fs/fs-writeback.c return wb; wb 312 fs/fs-writeback.c spin_unlock(&wb->list_lock); wb 313 fs/fs-writeback.c wb_put(wb); wb 327 fs/fs-writeback.c __acquires(&wb->list_lock) wb 565 fs/fs-writeback.c wbc->wb = inode_to_wb(inode); wb 568 fs/fs-writeback.c wbc->wb_id = wbc->wb->memcg_css->id; wb 575 fs/fs-writeback.c wb_get(wbc->wb); wb 585 fs/fs-writeback.c if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css))) wb 629 fs/fs-writeback.c struct bdi_writeback *wb = wbc->wb; wb 635 fs/fs-writeback.c if (!wb) wb 662 fs/fs-writeback.c wb->avg_write_bandwidth); wb 708 fs/fs-writeback.c wb_put(wbc->wb); wb 709 fs/fs-writeback.c wbc->wb = NULL; wb 735 fs/fs-writeback.c if (!wbc->wb || wbc->no_cgroup_owner) wb 786 fs/fs-writeback.c struct bdi_writeback *wb; wb 790 fs/fs-writeback.c wb = unlocked_inode_to_wb_begin(inode, &lock_cookie); wb 791 fs/fs-writeback.c congested = wb_congested(wb, cong_bits); wb 796 fs/fs-writeback.c return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); wb 809 fs/fs-writeback.c static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) wb 811 fs/fs-writeback.c unsigned long this_bw = wb->avg_write_bandwidth; wb 812 fs/fs-writeback.c unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); wb 844 fs/fs-writeback.c struct bdi_writeback *wb = list_entry(&bdi->wb_list, wb 850 fs/fs-writeback.c list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) { wb 862 fs/fs-writeback.c if (!wb_has_dirty_io(wb) && wb 864 fs/fs-writeback.c list_empty(&wb->b_dirty_time))) wb 866 fs/fs-writeback.c if (skip_if_busy && writeback_in_progress(wb)) wb 869 fs/fs-writeback.c nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages); wb 876 fs/fs-writeback.c wb_queue_work(wb, work); wb 887 fs/fs-writeback.c wb_queue_work(wb, work); wb 894 fs/fs-writeback.c wb_get(wb); wb 895 fs/fs-writeback.c last_wb = wb; wb 923 fs/fs-writeback.c struct bdi_writeback *wb; wb 946 fs/fs-writeback.c wb = wb_get_lookup(bdi, memcg_css); wb 947 fs/fs-writeback.c if (!wb) { wb 962 fs/fs-writeback.c mem_cgroup_wb_stats(wb, &filepages, &headroom, &dirty, wb 976 fs/fs-writeback.c wb_queue_work(wb, work); wb 982 fs/fs-writeback.c wb_put(wb); wb 1029 fs/fs-writeback.c __acquires(&wb->list_lock) wb 1031 fs/fs-writeback.c struct bdi_writeback *wb = inode_to_wb(inode); wb 1034 fs/fs-writeback.c spin_lock(&wb->list_lock); wb 1035 fs/fs-writeback.c return wb; wb 1039 fs/fs-writeback.c __acquires(&wb->list_lock) wb 1041 fs/fs-writeback.c struct bdi_writeback *wb = inode_to_wb(inode); wb 1043 fs/fs-writeback.c spin_lock(&wb->list_lock); wb 1044 fs/fs-writeback.c return wb; wb 1047 fs/fs-writeback.c static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages) wb 1058 fs/fs-writeback.c if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) { wb 1060 fs/fs-writeback.c wb_queue_work(&bdi->wb, base_work); wb 1077 fs/fs-writeback.c static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason) wb 1079 fs/fs-writeback.c if (!wb_has_dirty_io(wb)) wb 1090 fs/fs-writeback.c if (test_bit(WB_start_all, &wb->state) || wb 1091 fs/fs-writeback.c test_and_set_bit(WB_start_all, &wb->state)) wb 1094 fs/fs-writeback.c wb->start_all_reason = reason; wb 1095 fs/fs-writeback.c wb_wakeup(wb); wb 1108 fs/fs-writeback.c void wb_start_background_writeback(struct bdi_writeback *wb) wb 1114 fs/fs-writeback.c trace_writeback_wake_background(wb); wb 1115 fs/fs-writeback.c wb_wakeup(wb); wb 1123 fs/fs-writeback.c struct bdi_writeback *wb; wb 1125 fs/fs-writeback.c wb = inode_to_wb_and_lock_list(inode); wb 1126 fs/fs-writeback.c inode_io_list_del_locked(inode, wb); wb 1127 fs/fs-writeback.c spin_unlock(&wb->list_lock); wb 1175 fs/fs-writeback.c static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) wb 1177 fs/fs-writeback.c if (!list_empty(&wb->b_dirty)) { wb 1180 fs/fs-writeback.c tail = wb_inode(wb->b_dirty.next); wb 1184 fs/fs-writeback.c inode_io_list_move_locked(inode, wb, &wb->b_dirty); wb 1190 fs/fs-writeback.c static void requeue_io(struct inode *inode, struct bdi_writeback *wb) wb 1192 fs/fs-writeback.c inode_io_list_move_locked(inode, wb, &wb->b_more_io); wb 1292 fs/fs-writeback.c static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) wb 1296 fs/fs-writeback.c assert_spin_locked(&wb->list_lock); wb 1297 fs/fs-writeback.c list_splice_init(&wb->b_more_io, &wb->b_io); wb 1298 fs/fs-writeback.c moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); wb 1299 fs/fs-writeback.c moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, wb 1302 fs/fs-writeback.c wb_io_lists_populated(wb); wb 1303 fs/fs-writeback.c trace_writeback_queue_io(wb, work, moved); wb 1377 fs/fs-writeback.c static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, wb 1397 fs/fs-writeback.c redirty_tail(inode, wb); wb 1408 fs/fs-writeback.c requeue_io(inode, wb); wb 1417 fs/fs-writeback.c redirty_tail(inode, wb); wb 1425 fs/fs-writeback.c redirty_tail(inode, wb); wb 1428 fs/fs-writeback.c inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); wb 1431 fs/fs-writeback.c inode_io_list_del_locked(inode, wb); wb 1530 fs/fs-writeback.c struct bdi_writeback *wb; wb 1569 fs/fs-writeback.c wb = inode_to_wb_and_lock_list(inode); wb 1576 fs/fs-writeback.c inode_io_list_del_locked(inode, wb); wb 1577 fs/fs-writeback.c spin_unlock(&wb->list_lock); wb 1584 fs/fs-writeback.c static long writeback_chunk_size(struct bdi_writeback *wb, wb 1605 fs/fs-writeback.c pages = min(wb->avg_write_bandwidth / 2, wb 1625 fs/fs-writeback.c struct bdi_writeback *wb, wb 1642 fs/fs-writeback.c while (!list_empty(&wb->b_io)) { wb 1643 fs/fs-writeback.c struct inode *inode = wb_inode(wb->b_io.prev); wb 1653 fs/fs-writeback.c redirty_tail(inode, wb); wb 1673 fs/fs-writeback.c redirty_tail(inode, wb); wb 1687 fs/fs-writeback.c requeue_io(inode, wb); wb 1691 fs/fs-writeback.c spin_unlock(&wb->list_lock); wb 1702 fs/fs-writeback.c spin_lock(&wb->list_lock); wb 1708 fs/fs-writeback.c write_chunk = writeback_chunk_size(wb, work); wb 1747 fs/fs-writeback.c if (unlikely(tmp_wb != wb)) { wb 1749 fs/fs-writeback.c spin_lock(&wb->list_lock); wb 1766 fs/fs-writeback.c static long __writeback_inodes_wb(struct bdi_writeback *wb, wb 1772 fs/fs-writeback.c while (!list_empty(&wb->b_io)) { wb 1773 fs/fs-writeback.c struct inode *inode = wb_inode(wb->b_io.prev); wb 1782 fs/fs-writeback.c redirty_tail(inode, wb); wb 1785 fs/fs-writeback.c wrote += writeback_sb_inodes(sb, wb, work); wb 1800 fs/fs-writeback.c static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, wb 1812 fs/fs-writeback.c spin_lock(&wb->list_lock); wb 1813 fs/fs-writeback.c if (list_empty(&wb->b_io)) wb 1814 fs/fs-writeback.c queue_io(wb, &work); wb 1815 fs/fs-writeback.c __writeback_inodes_wb(wb, &work); wb 1816 fs/fs-writeback.c spin_unlock(&wb->list_lock); wb 1837 fs/fs-writeback.c static long wb_writeback(struct bdi_writeback *wb, wb 1851 fs/fs-writeback.c spin_lock(&wb->list_lock); wb 1866 fs/fs-writeback.c !list_empty(&wb->work_list)) wb 1873 fs/fs-writeback.c if (work->for_background && !wb_over_bg_thresh(wb)) wb 1888 fs/fs-writeback.c trace_writeback_start(wb, work); wb 1889 fs/fs-writeback.c if (list_empty(&wb->b_io)) wb 1890 fs/fs-writeback.c queue_io(wb, work); wb 1892 fs/fs-writeback.c progress = writeback_sb_inodes(work->sb, wb, work); wb 1894 fs/fs-writeback.c progress = __writeback_inodes_wb(wb, work); wb 1895 fs/fs-writeback.c trace_writeback_written(wb, work); wb 1897 fs/fs-writeback.c wb_update_bandwidth(wb, wb_start); wb 1912 fs/fs-writeback.c if (list_empty(&wb->b_more_io)) wb 1919 fs/fs-writeback.c trace_writeback_wait(wb, work); wb 1920 fs/fs-writeback.c inode = wb_inode(wb->b_more_io.prev); wb 1922 fs/fs-writeback.c spin_unlock(&wb->list_lock); wb 1925 fs/fs-writeback.c spin_lock(&wb->list_lock); wb 1927 fs/fs-writeback.c spin_unlock(&wb->list_lock); wb 1936 fs/fs-writeback.c static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb) wb 1940 fs/fs-writeback.c spin_lock_bh(&wb->work_lock); wb 1941 fs/fs-writeback.c if (!list_empty(&wb->work_list)) { wb 1942 fs/fs-writeback.c work = list_entry(wb->work_list.next, wb 1946 fs/fs-writeback.c spin_unlock_bh(&wb->work_lock); wb 1950 fs/fs-writeback.c static long wb_check_background_flush(struct bdi_writeback *wb) wb 1952 fs/fs-writeback.c if (wb_over_bg_thresh(wb)) { wb 1962 fs/fs-writeback.c return wb_writeback(wb, &work); wb 1968 fs/fs-writeback.c static long wb_check_old_data_flush(struct bdi_writeback *wb) wb 1979 fs/fs-writeback.c expired = wb->last_old_flush + wb 1984 fs/fs-writeback.c wb->last_old_flush = jiffies; wb 1996 fs/fs-writeback.c return wb_writeback(wb, &work); wb 2002 fs/fs-writeback.c static long wb_check_start_all(struct bdi_writeback *wb) wb 2006 fs/fs-writeback.c if (!test_bit(WB_start_all, &wb->state)) wb 2012 fs/fs-writeback.c .nr_pages = wb_split_bdi_pages(wb, nr_pages), wb 2015 fs/fs-writeback.c .reason = wb->start_all_reason, wb 2018 fs/fs-writeback.c nr_pages = wb_writeback(wb, &work); wb 2021 fs/fs-writeback.c clear_bit(WB_start_all, &wb->state); wb 2029 fs/fs-writeback.c static long wb_do_writeback(struct bdi_writeback *wb) wb 2034 fs/fs-writeback.c set_bit(WB_writeback_running, &wb->state); wb 2035 fs/fs-writeback.c while ((work = get_next_work_item(wb)) != NULL) { wb 2036 fs/fs-writeback.c trace_writeback_exec(wb, work); wb 2037 fs/fs-writeback.c wrote += wb_writeback(wb, work); wb 2038 fs/fs-writeback.c finish_writeback_work(wb, work); wb 2044 fs/fs-writeback.c wrote += wb_check_start_all(wb); wb 2049 fs/fs-writeback.c wrote += wb_check_old_data_flush(wb); wb 2050 fs/fs-writeback.c wrote += wb_check_background_flush(wb); wb 2051 fs/fs-writeback.c clear_bit(WB_writeback_running, &wb->state); wb 2062 fs/fs-writeback.c struct bdi_writeback *wb = container_of(to_delayed_work(work), wb 2066 fs/fs-writeback.c set_worker_desc("flush-%s", bdi_dev_name(wb->bdi)); wb 2070 fs/fs-writeback.c !test_bit(WB_registered, &wb->state))) { wb 2078 fs/fs-writeback.c pages_written = wb_do_writeback(wb); wb 2080 fs/fs-writeback.c } while (!list_empty(&wb->work_list)); wb 2087 fs/fs-writeback.c pages_written = writeback_inodes_wb(wb, 1024, wb 2092 fs/fs-writeback.c if (!list_empty(&wb->work_list)) wb 2093 fs/fs-writeback.c wb_wakeup(wb); wb 2094 fs/fs-writeback.c else if (wb_has_dirty_io(wb) && dirty_writeback_interval) wb 2095 fs/fs-writeback.c wb_wakeup_delayed(wb); wb 2107 fs/fs-writeback.c struct bdi_writeback *wb; wb 2112 fs/fs-writeback.c list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) wb 2113 fs/fs-writeback.c wb_start_writeback(wb, reason); wb 2167 fs/fs-writeback.c struct bdi_writeback *wb; wb 2169 fs/fs-writeback.c list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) wb 2170 fs/fs-writeback.c if (!list_empty(&wb->b_dirty_time)) wb 2171 fs/fs-writeback.c wb_wakeup(wb); wb 2315 fs/fs-writeback.c struct bdi_writeback *wb; wb 2319 fs/fs-writeback.c wb = locked_inode_to_wb_and_lock_list(inode); wb 2321 fs/fs-writeback.c WARN(bdi_cap_writeback_dirty(wb->bdi) && wb 2322 fs/fs-writeback.c !test_bit(WB_registered, &wb->state), wb 2323 fs/fs-writeback.c "bdi-%s not registered\n", wb->bdi->name); wb 2330 fs/fs-writeback.c dirty_list = &wb->b_dirty; wb 2332 fs/fs-writeback.c dirty_list = &wb->b_dirty_time; wb 2334 fs/fs-writeback.c wakeup_bdi = inode_io_list_move_locked(inode, wb, wb 2337 fs/fs-writeback.c spin_unlock(&wb->list_lock); wb 2346 fs/fs-writeback.c if (bdi_cap_writeback_dirty(wb->bdi) && wakeup_bdi) wb 2347 fs/fs-writeback.c wb_wakeup_delayed(wb); wb 1619 fs/fuse/file.c dec_wb_stat(&bdi->wb, WB_WRITEBACK); wb 1621 fs/fuse/file.c wb_writeout_inc(&bdi->wb); wb 1854 fs/fuse/file.c inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); wb 2001 fs/fuse/file.c dec_wb_stat(&bdi->wb, WB_WRITEBACK); wb 2003 fs/fuse/file.c wb_writeout_inc(&bdi->wb); wb 2100 fs/fuse/file.c inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); wb 514 fs/gfs2/super.c if (bdi->wb.dirty_exceeded) wb 664 fs/nfs/internal.h inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE); wb 960 fs/nfs/write.c dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb, wb 210 include/linux/backing-dev-defs.h struct bdi_writeback wb; /* the root writeback info for this bdi */ wb 243 include/linux/backing-dev-defs.h clear_wb_congested(bdi->wb.congested, sync); wb 248 include/linux/backing-dev-defs.h set_wb_congested(bdi->wb.congested, sync); wb 262 include/linux/backing-dev-defs.h static inline bool wb_tryget(struct bdi_writeback *wb) wb 264 include/linux/backing-dev-defs.h if (wb != &wb->bdi->wb) wb 265 include/linux/backing-dev-defs.h return percpu_ref_tryget(&wb->refcnt); wb 273 include/linux/backing-dev-defs.h static inline void wb_get(struct bdi_writeback *wb) wb 275 include/linux/backing-dev-defs.h if (wb != &wb->bdi->wb) wb 276 include/linux/backing-dev-defs.h percpu_ref_get(&wb->refcnt); wb 283 include/linux/backing-dev-defs.h static inline void wb_put(struct bdi_writeback *wb) wb 285 include/linux/backing-dev-defs.h if (WARN_ON_ONCE(!wb->bdi)) { wb 293 include/linux/backing-dev-defs.h if (wb != &wb->bdi->wb) wb 294 include/linux/backing-dev-defs.h percpu_ref_put(&wb->refcnt); wb 303 include/linux/backing-dev-defs.h static inline bool wb_dying(struct bdi_writeback *wb) wb 305 include/linux/backing-dev-defs.h return percpu_ref_is_dying(&wb->refcnt); wb 310 include/linux/backing-dev-defs.h static inline bool wb_tryget(struct bdi_writeback *wb) wb 315 include/linux/backing-dev-defs.h static inline void wb_get(struct bdi_writeback *wb) wb 319 include/linux/backing-dev-defs.h static inline void wb_put(struct bdi_writeback *wb) wb 323 include/linux/backing-dev-defs.h static inline bool wb_dying(struct bdi_writeback *wb) wb 45 include/linux/backing-dev.h void wb_start_background_writeback(struct bdi_writeback *wb); wb 47 include/linux/backing-dev.h void wb_wakeup_delayed(struct bdi_writeback *wb); wb 57 include/linux/backing-dev.h static inline bool wb_has_dirty_io(struct bdi_writeback *wb) wb 59 include/linux/backing-dev.h return test_bit(WB_has_dirty_io, &wb->state); wb 71 include/linux/backing-dev.h static inline void __add_wb_stat(struct bdi_writeback *wb, wb 74 include/linux/backing-dev.h percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); wb 77 include/linux/backing-dev.h static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) wb 79 include/linux/backing-dev.h __add_wb_stat(wb, item, 1); wb 82 include/linux/backing-dev.h static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) wb 84 include/linux/backing-dev.h __add_wb_stat(wb, item, -1); wb 87 include/linux/backing-dev.h static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) wb 89 include/linux/backing-dev.h return percpu_counter_read_positive(&wb->stat[item]); wb 92 include/linux/backing-dev.h static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) wb 94 include/linux/backing-dev.h return percpu_counter_sum_positive(&wb->stat[item]); wb 97 include/linux/backing-dev.h extern void wb_writeout_inc(struct bdi_writeback *wb); wb 154 include/linux/backing-dev.h static inline bool writeback_in_progress(struct bdi_writeback *wb) wb 156 include/linux/backing-dev.h return test_bit(WB_writeback_running, &wb->state); wb 174 include/linux/backing-dev.h static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) wb 176 include/linux/backing-dev.h struct backing_dev_info *bdi = wb->bdi; wb 180 include/linux/backing-dev.h return wb->congested->state & cong_bits; wb 276 include/linux/backing-dev.h struct bdi_writeback *wb; wb 280 include/linux/backing-dev.h return &bdi->wb; wb 282 include/linux/backing-dev.h wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); wb 288 include/linux/backing-dev.h if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) wb 289 include/linux/backing-dev.h return wb; wb 305 include/linux/backing-dev.h struct bdi_writeback *wb; wb 308 include/linux/backing-dev.h wb = wb_find_current(bdi); wb 309 include/linux/backing-dev.h if (wb && unlikely(!wb_tryget(wb))) wb 310 include/linux/backing-dev.h wb = NULL; wb 313 include/linux/backing-dev.h if (unlikely(!wb)) { wb 317 include/linux/backing-dev.h wb = wb_get_create(bdi, memcg_css, gfp); wb 320 include/linux/backing-dev.h return wb; wb 426 include/linux/backing-dev.h return &bdi->wb; wb 432 include/linux/backing-dev.h return &bdi->wb; wb 442 include/linux/backing-dev.h return &inode_to_bdi(inode)->wb; wb 466 include/linux/backing-dev.h return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); wb 489 include/linux/backing-dev.h return wb_congested(&bdi->wb, cong_bits); wb 1289 include/linux/memcontrol.h struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); wb 1290 include/linux/memcontrol.h void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, wb 1295 include/linux/memcontrol.h struct bdi_writeback *wb); wb 1298 include/linux/memcontrol.h struct bdi_writeback *wb) wb 1303 include/linux/memcontrol.h if (unlikely(&page->mem_cgroup->css != wb->memcg_css)) wb 1304 include/linux/memcontrol.h mem_cgroup_track_foreign_dirty_slowpath(page, wb); wb 1307 include/linux/memcontrol.h void mem_cgroup_flush_foreign(struct bdi_writeback *wb); wb 1311 include/linux/memcontrol.h static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) wb 1316 include/linux/memcontrol.h static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, wb 1325 include/linux/memcontrol.h struct bdi_writeback *wb) wb 1329 include/linux/memcontrol.h static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) wb 1610 include/linux/mm.h struct bdi_writeback *wb); wb 84 include/linux/writeback.h struct bdi_writeback *wb; /* wb this writeback is issued under */ wb 116 include/linux/writeback.h if (wbc->wb) wb 117 include/linux/writeback.h return wbc->wb->blkcg_css; wb 289 include/linux/writeback.h if (wbc->wb) wb 290 include/linux/writeback.h bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css); wb 385 include/linux/writeback.h unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); wb 387 include/linux/writeback.h void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time); wb 389 include/linux/writeback.h bool wb_over_bg_thresh(struct bdi_writeback *wb); wb 152 include/trace/events/writeback.h static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb) wb 154 include/trace/events/writeback.h return wb->memcg_css->cgroup->kn->id.ino; wb 159 include/trace/events/writeback.h if (wbc->wb) wb 160 include/trace/events/writeback.h return __trace_wb_assign_cgroup(wbc->wb); wb 166 include/trace/events/writeback.h static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb) wb 240 include/trace/events/writeback.h TP_PROTO(struct page *page, struct bdi_writeback *wb), wb 242 include/trace/events/writeback.h TP_ARGS(page, wb), wb 257 include/trace/events/writeback.h strncpy(__entry->name, bdi_dev_name(wb->bdi), 32); wb 258 include/trace/events/writeback.h __entry->bdi_id = wb->bdi->id; wb 260 include/trace/events/writeback.h __entry->memcg_id = wb->memcg_css->id; wb 261 include/trace/events/writeback.h __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); wb 277 include/trace/events/writeback.h TP_PROTO(struct bdi_writeback *wb, unsigned int frn_bdi_id, wb 280 include/trace/events/writeback.h TP_ARGS(wb, frn_bdi_id, frn_memcg_id), wb 290 include/trace/events/writeback.h strncpy(__entry->name, bdi_dev_name(wb->bdi), 32); wb 291 include/trace/events/writeback.h __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); wb 349 include/trace/events/writeback.h TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), wb 350 include/trace/events/writeback.h TP_ARGS(wb, work), wb 363 include/trace/events/writeback.h strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); wb 371 include/trace/events/writeback.h __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); wb 388 include/trace/events/writeback.h TP_PROTO(struct bdi_writeback *wb, struct wb_writeback_work *work), \ wb 389 include/trace/events/writeback.h TP_ARGS(wb, work)) wb 409 include/trace/events/writeback.h TP_PROTO(struct bdi_writeback *wb), wb 410 include/trace/events/writeback.h TP_ARGS(wb), wb 416 include/trace/events/writeback.h strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); wb 417 include/trace/events/writeback.h __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); wb 426 include/trace/events/writeback.h TP_PROTO(struct bdi_writeback *wb), \ wb 427 include/trace/events/writeback.h TP_ARGS(wb)) wb 500 include/trace/events/writeback.h TP_PROTO(struct bdi_writeback *wb, wb 503 include/trace/events/writeback.h TP_ARGS(wb, work, moved), wb 514 include/trace/events/writeback.h strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32); wb 520 include/trace/events/writeback.h __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); wb 582 include/trace/events/writeback.h TP_PROTO(struct bdi_writeback *wb, wb 586 include/trace/events/writeback.h TP_ARGS(wb, dirty_rate, task_ratelimit), wb 600 include/trace/events/writeback.h strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); wb 601 include/trace/events/writeback.h __entry->write_bw = KBps(wb->write_bandwidth); wb 602 include/trace/events/writeback.h __entry->avg_write_bw = KBps(wb->avg_write_bandwidth); wb 604 include/trace/events/writeback.h __entry->dirty_ratelimit = KBps(wb->dirty_ratelimit); wb 607 include/trace/events/writeback.h KBps(wb->balanced_dirty_ratelimit); wb 608 include/trace/events/writeback.h __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); wb 628 include/trace/events/writeback.h TP_PROTO(struct bdi_writeback *wb, wb 641 include/trace/events/writeback.h TP_ARGS(wb, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, wb 665 include/trace/events/writeback.h strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32); wb 683 include/trace/events/writeback.h __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); wb 77 include/uapi/drm/lima_drm.h __u32 wb[3 * LIMA_PP_WB_REG_NUM]; wb 86 include/uapi/drm/lima_drm.h __u32 wb[3 * LIMA_PP_WB_REG_NUM]; wb 52 mm/backing-dev.c struct bdi_writeback *wb = &bdi->wb; wb 60 mm/backing-dev.c spin_lock(&wb->list_lock); wb 61 mm/backing-dev.c list_for_each_entry(inode, &wb->b_dirty, i_io_list) wb 63 mm/backing-dev.c list_for_each_entry(inode, &wb->b_io, i_io_list) wb 65 mm/backing-dev.c list_for_each_entry(inode, &wb->b_more_io, i_io_list) wb 67 mm/backing-dev.c list_for_each_entry(inode, &wb->b_dirty_time, i_io_list) wb 70 mm/backing-dev.c spin_unlock(&wb->list_lock); wb 73 mm/backing-dev.c wb_thresh = wb_calc_thresh(wb, dirty_thresh); wb 91 mm/backing-dev.c (unsigned long) K(wb_stat(wb, WB_WRITEBACK)), wb 92 mm/backing-dev.c (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)), wb 96 mm/backing-dev.c (unsigned long) K(wb_stat(wb, WB_DIRTIED)), wb 97 mm/backing-dev.c (unsigned long) K(wb_stat(wb, WB_WRITTEN)), wb 98 mm/backing-dev.c (unsigned long) K(wb->write_bandwidth), wb 103 mm/backing-dev.c !list_empty(&bdi->bdi_list), bdi->wb.state); wb 268 mm/backing-dev.c void wb_wakeup_delayed(struct bdi_writeback *wb) wb 273 mm/backing-dev.c spin_lock_bh(&wb->work_lock); wb 274 mm/backing-dev.c if (test_bit(WB_registered, &wb->state)) wb 275 mm/backing-dev.c queue_delayed_work(bdi_wq, &wb->dwork, timeout); wb 276 mm/backing-dev.c spin_unlock_bh(&wb->work_lock); wb 284 mm/backing-dev.c static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, wb 289 mm/backing-dev.c memset(wb, 0, sizeof(*wb)); wb 291 mm/backing-dev.c if (wb != &bdi->wb) wb 293 mm/backing-dev.c wb->bdi = bdi; wb 294 mm/backing-dev.c wb->last_old_flush = jiffies; wb 295 mm/backing-dev.c INIT_LIST_HEAD(&wb->b_dirty); wb 296 mm/backing-dev.c INIT_LIST_HEAD(&wb->b_io); wb 297 mm/backing-dev.c INIT_LIST_HEAD(&wb->b_more_io); wb 298 mm/backing-dev.c INIT_LIST_HEAD(&wb->b_dirty_time); wb 299 mm/backing-dev.c spin_lock_init(&wb->list_lock); wb 301 mm/backing-dev.c wb->bw_time_stamp = jiffies; wb 302 mm/backing-dev.c wb->balanced_dirty_ratelimit = INIT_BW; wb 303 mm/backing-dev.c wb->dirty_ratelimit = INIT_BW; wb 304 mm/backing-dev.c wb->write_bandwidth = INIT_BW; wb 305 mm/backing-dev.c wb->avg_write_bandwidth = INIT_BW; wb 307 mm/backing-dev.c spin_lock_init(&wb->work_lock); wb 308 mm/backing-dev.c INIT_LIST_HEAD(&wb->work_list); wb 309 mm/backing-dev.c INIT_DELAYED_WORK(&wb->dwork, wb_workfn); wb 310 mm/backing-dev.c wb->dirty_sleep = jiffies; wb 312 mm/backing-dev.c wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp); wb 313 mm/backing-dev.c if (!wb->congested) { wb 318 mm/backing-dev.c err = fprop_local_init_percpu(&wb->completions, gfp); wb 323 mm/backing-dev.c err = percpu_counter_init(&wb->stat[i], 0, gfp); wb 332 mm/backing-dev.c percpu_counter_destroy(&wb->stat[i]); wb 333 mm/backing-dev.c fprop_local_destroy_percpu(&wb->completions); wb 335 mm/backing-dev.c wb_congested_put(wb->congested); wb 337 mm/backing-dev.c if (wb != &bdi->wb) wb 342 mm/backing-dev.c static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb); wb 347 mm/backing-dev.c static void wb_shutdown(struct bdi_writeback *wb) wb 350 mm/backing-dev.c spin_lock_bh(&wb->work_lock); wb 351 mm/backing-dev.c if (!test_and_clear_bit(WB_registered, &wb->state)) { wb 352 mm/backing-dev.c spin_unlock_bh(&wb->work_lock); wb 355 mm/backing-dev.c spin_unlock_bh(&wb->work_lock); wb 357 mm/backing-dev.c cgwb_remove_from_bdi_list(wb); wb 363 mm/backing-dev.c mod_delayed_work(bdi_wq, &wb->dwork, 0); wb 364 mm/backing-dev.c flush_delayed_work(&wb->dwork); wb 365 mm/backing-dev.c WARN_ON(!list_empty(&wb->work_list)); wb 368 mm/backing-dev.c static void wb_exit(struct bdi_writeback *wb) wb 372 mm/backing-dev.c WARN_ON(delayed_work_pending(&wb->dwork)); wb 375 mm/backing-dev.c percpu_counter_destroy(&wb->stat[i]); wb 377 mm/backing-dev.c fprop_local_destroy_percpu(&wb->completions); wb 378 mm/backing-dev.c wb_congested_put(wb->congested); wb 379 mm/backing-dev.c if (wb != &wb->bdi->wb) wb 380 mm/backing-dev.c bdi_put(wb->bdi); wb 483 mm/backing-dev.c struct bdi_writeback *wb = container_of(work, struct bdi_writeback, wb 485 mm/backing-dev.c struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css); wb 487 mm/backing-dev.c mutex_lock(&wb->bdi->cgwb_release_mutex); wb 488 mm/backing-dev.c wb_shutdown(wb); wb 490 mm/backing-dev.c css_put(wb->memcg_css); wb 491 mm/backing-dev.c css_put(wb->blkcg_css); wb 492 mm/backing-dev.c mutex_unlock(&wb->bdi->cgwb_release_mutex); wb 497 mm/backing-dev.c fprop_local_destroy_percpu(&wb->memcg_completions); wb 498 mm/backing-dev.c percpu_ref_exit(&wb->refcnt); wb 499 mm/backing-dev.c wb_exit(wb); wb 500 mm/backing-dev.c kfree_rcu(wb, rcu); wb 505 mm/backing-dev.c struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback, wb 507 mm/backing-dev.c queue_work(cgwb_release_wq, &wb->release_work); wb 510 mm/backing-dev.c static void cgwb_kill(struct bdi_writeback *wb) wb 514 mm/backing-dev.c WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id)); wb 515 mm/backing-dev.c list_del(&wb->memcg_node); wb 516 mm/backing-dev.c list_del(&wb->blkcg_node); wb 517 mm/backing-dev.c percpu_ref_kill(&wb->refcnt); wb 520 mm/backing-dev.c static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) wb 523 mm/backing-dev.c list_del_rcu(&wb->bdi_node); wb 534 mm/backing-dev.c struct bdi_writeback *wb; wb 546 mm/backing-dev.c wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); wb 547 mm/backing-dev.c if (wb && wb->blkcg_css != blkcg_css) { wb 548 mm/backing-dev.c cgwb_kill(wb); wb 549 mm/backing-dev.c wb = NULL; wb 552 mm/backing-dev.c if (wb) wb 556 mm/backing-dev.c wb = kmalloc(sizeof(*wb), gfp); wb 557 mm/backing-dev.c if (!wb) { wb 562 mm/backing-dev.c ret = wb_init(wb, bdi, blkcg_css->id, gfp); wb 566 mm/backing-dev.c ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp); wb 570 mm/backing-dev.c ret = fprop_local_init_percpu(&wb->memcg_completions, gfp); wb 574 mm/backing-dev.c wb->memcg_css = memcg_css; wb 575 mm/backing-dev.c wb->blkcg_css = blkcg_css; wb 576 mm/backing-dev.c INIT_WORK(&wb->release_work, cgwb_release_workfn); wb 577 mm/backing-dev.c set_bit(WB_registered, &wb->state); wb 587 mm/backing-dev.c if (test_bit(WB_registered, &bdi->wb.state) && wb 590 mm/backing-dev.c ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); wb 592 mm/backing-dev.c list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); wb 593 mm/backing-dev.c list_add(&wb->memcg_node, memcg_cgwb_list); wb 594 mm/backing-dev.c list_add(&wb->blkcg_node, blkcg_cgwb_list); wb 609 mm/backing-dev.c fprop_local_destroy_percpu(&wb->memcg_completions); wb 611 mm/backing-dev.c percpu_ref_exit(&wb->refcnt); wb 613 mm/backing-dev.c wb_exit(wb); wb 615 mm/backing-dev.c kfree(wb); wb 647 mm/backing-dev.c struct bdi_writeback *wb; wb 650 mm/backing-dev.c return &bdi->wb; wb 653 mm/backing-dev.c wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); wb 654 mm/backing-dev.c if (wb) { wb 659 mm/backing-dev.c if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb))) wb 660 mm/backing-dev.c wb = NULL; wb 665 mm/backing-dev.c return wb; wb 681 mm/backing-dev.c struct bdi_writeback *wb; wb 686 mm/backing-dev.c return &bdi->wb; wb 689 mm/backing-dev.c wb = wb_get_lookup(bdi, memcg_css); wb 690 mm/backing-dev.c } while (!wb && !cgwb_create(bdi, memcg_css, gfp)); wb 692 mm/backing-dev.c return wb; wb 704 mm/backing-dev.c ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); wb 706 mm/backing-dev.c bdi->wb.memcg_css = &root_mem_cgroup->css; wb 707 mm/backing-dev.c bdi->wb.blkcg_css = blkcg_root_css; wb 716 mm/backing-dev.c struct bdi_writeback *wb; wb 718 mm/backing-dev.c WARN_ON(test_bit(WB_registered, &bdi->wb.state)); wb 728 mm/backing-dev.c wb = list_first_entry(&bdi->wb_list, struct bdi_writeback, wb 731 mm/backing-dev.c wb_shutdown(wb); wb 747 mm/backing-dev.c struct bdi_writeback *wb, *next; wb 750 mm/backing-dev.c list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node) wb 751 mm/backing-dev.c cgwb_kill(wb); wb 764 mm/backing-dev.c struct bdi_writeback *wb, *next; wb 767 mm/backing-dev.c list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node) wb 768 mm/backing-dev.c cgwb_kill(wb); wb 791 mm/backing-dev.c list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); wb 822 mm/backing-dev.c err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); wb 839 mm/backing-dev.c list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); wb 842 mm/backing-dev.c static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) wb 844 mm/backing-dev.c list_del_rcu(&wb->bdi_node); wb 950 mm/backing-dev.c set_bit(WB_registered, &bdi->wb.state); wb 1013 mm/backing-dev.c wb_shutdown(&bdi->wb); wb 1033 mm/backing-dev.c if (test_bit(WB_registered, &bdi->wb.state)) wb 1036 mm/backing-dev.c wb_exit(&bdi->wb); wb 4394 mm/memcontrol.c struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) wb 4396 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); wb 4438 mm/memcontrol.c void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, wb 4442 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); wb 4507 mm/memcontrol.c struct bdi_writeback *wb) wb 4516 mm/memcontrol.c trace_track_foreign_dirty(page, wb); wb 4525 mm/memcontrol.c if (frn->bdi_id == wb->bdi->id && wb 4526 mm/memcontrol.c frn->memcg_id == wb->memcg_css->id) wb 4552 mm/memcontrol.c frn->bdi_id = wb->bdi->id; wb 4553 mm/memcontrol.c frn->memcg_id = wb->memcg_css->id; wb 4559 mm/memcontrol.c void mem_cgroup_flush_foreign(struct bdi_writeback *wb) wb 4561 mm/memcontrol.c struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); wb 4578 mm/memcontrol.c trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); wb 134 mm/page-writeback.c struct bdi_writeback *wb; wb 158 mm/page-writeback.c #define GDTC_INIT(__wb) .wb = (__wb), \ wb 164 mm/page-writeback.c #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \ wb 184 mm/page-writeback.c static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) wb 186 mm/page-writeback.c return &wb->memcg_completions; wb 189 mm/page-writeback.c static void wb_min_max_ratio(struct bdi_writeback *wb, wb 192 mm/page-writeback.c unsigned long this_bw = wb->avg_write_bandwidth; wb 193 mm/page-writeback.c unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); wb 194 mm/page-writeback.c unsigned long long min = wb->bdi->min_ratio; wb 195 mm/page-writeback.c unsigned long long max = wb->bdi->max_ratio; wb 218 mm/page-writeback.c #define GDTC_INIT(__wb) .wb = (__wb), \ wb 238 mm/page-writeback.c static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb) wb 243 mm/page-writeback.c static void wb_min_max_ratio(struct bdi_writeback *wb, wb 246 mm/page-writeback.c *minp = wb->bdi->min_ratio; wb 247 mm/page-writeback.c *maxp = wb->bdi->max_ratio; wb 600 mm/page-writeback.c static inline void __wb_writeout_inc(struct bdi_writeback *wb) wb 604 mm/page-writeback.c inc_wb_stat(wb, WB_WRITTEN); wb 605 mm/page-writeback.c wb_domain_writeout_inc(&global_wb_domain, &wb->completions, wb 606 mm/page-writeback.c wb->bdi->max_prop_frac); wb 608 mm/page-writeback.c cgdom = mem_cgroup_wb_domain(wb); wb 610 mm/page-writeback.c wb_domain_writeout_inc(cgdom, wb_memcg_completions(wb), wb 611 mm/page-writeback.c wb->bdi->max_prop_frac); wb 614 mm/page-writeback.c void wb_writeout_inc(struct bdi_writeback *wb) wb 619 mm/page-writeback.c __wb_writeout_inc(wb); wb 782 mm/page-writeback.c wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio); wb 791 mm/page-writeback.c unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh) wb 793 mm/page-writeback.c struct dirty_throttle_control gdtc = { GDTC_INIT(wb), wb 906 mm/page-writeback.c struct bdi_writeback *wb = dtc->wb; wb 907 mm/page-writeback.c unsigned long write_bw = wb->avg_write_bandwidth; wb 956 mm/page-writeback.c if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { wb 1083 mm/page-writeback.c static void wb_update_write_bandwidth(struct bdi_writeback *wb, wb 1088 mm/page-writeback.c unsigned long avg = wb->avg_write_bandwidth; wb 1089 mm/page-writeback.c unsigned long old = wb->write_bandwidth; wb 1102 mm/page-writeback.c bw = written - min(written, wb->written_stamp); wb 1109 mm/page-writeback.c bw += (u64)wb->write_bandwidth * (period - elapsed); wb 1124 mm/page-writeback.c if (wb_has_dirty_io(wb)) { wb 1125 mm/page-writeback.c long delta = avg - wb->avg_write_bandwidth; wb 1127 mm/page-writeback.c &wb->bdi->tot_write_bandwidth) <= 0); wb 1129 mm/page-writeback.c wb->write_bandwidth = bw; wb 1130 mm/page-writeback.c wb->avg_write_bandwidth = avg; wb 1191 mm/page-writeback.c struct bdi_writeback *wb = dtc->wb; wb 1196 mm/page-writeback.c unsigned long write_bw = wb->avg_write_bandwidth; wb 1197 mm/page-writeback.c unsigned long dirty_ratelimit = wb->dirty_ratelimit; wb 1209 mm/page-writeback.c dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed; wb 1303 mm/page-writeback.c if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { wb 1312 mm/page-writeback.c x = min3(wb->balanced_dirty_ratelimit, wb 1317 mm/page-writeback.c x = max3(wb->balanced_dirty_ratelimit, wb 1339 mm/page-writeback.c wb->dirty_ratelimit = max(dirty_ratelimit, 1UL); wb 1340 mm/page-writeback.c wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit; wb 1342 mm/page-writeback.c trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit); wb 1350 mm/page-writeback.c struct bdi_writeback *wb = gdtc->wb; wb 1352 mm/page-writeback.c unsigned long elapsed = now - wb->bw_time_stamp; wb 1356 mm/page-writeback.c lockdep_assert_held(&wb->list_lock); wb 1364 mm/page-writeback.c dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]); wb 1365 mm/page-writeback.c written = percpu_counter_read(&wb->stat[WB_WRITTEN]); wb 1371 mm/page-writeback.c if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time)) wb 1387 mm/page-writeback.c wb_update_write_bandwidth(wb, elapsed, written); wb 1390 mm/page-writeback.c wb->dirtied_stamp = dirtied; wb 1391 mm/page-writeback.c wb->written_stamp = written; wb 1392 mm/page-writeback.c wb->bw_time_stamp = now; wb 1395 mm/page-writeback.c void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time) wb 1397 mm/page-writeback.c struct dirty_throttle_control gdtc = { GDTC_INIT(wb) }; wb 1419 mm/page-writeback.c static unsigned long wb_max_pause(struct bdi_writeback *wb, wb 1422 mm/page-writeback.c unsigned long bw = wb->avg_write_bandwidth; wb 1438 mm/page-writeback.c static long wb_min_pause(struct bdi_writeback *wb, wb 1444 mm/page-writeback.c long hi = ilog2(wb->avg_write_bandwidth); wb 1445 mm/page-writeback.c long lo = ilog2(wb->dirty_ratelimit); wb 1515 mm/page-writeback.c struct bdi_writeback *wb = dtc->wb; wb 1546 mm/page-writeback.c wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); wb 1547 mm/page-writeback.c dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK); wb 1549 mm/page-writeback.c wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE); wb 1550 mm/page-writeback.c dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK); wb 1561 mm/page-writeback.c static void balance_dirty_pages(struct bdi_writeback *wb, wb 1564 mm/page-writeback.c struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) }; wb 1565 mm/page-writeback.c struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) }; wb 1579 mm/page-writeback.c struct backing_dev_info *bdi = wb->bdi; wb 1622 mm/page-writeback.c mem_cgroup_wb_stats(wb, &filepages, &headroom, wb 1667 mm/page-writeback.c if (unlikely(!writeback_in_progress(wb))) wb 1668 mm/page-writeback.c wb_start_background_writeback(wb); wb 1670 mm/page-writeback.c mem_cgroup_flush_foreign(wb); wb 1703 mm/page-writeback.c if (dirty_exceeded && !wb->dirty_exceeded) wb 1704 mm/page-writeback.c wb->dirty_exceeded = 1; wb 1706 mm/page-writeback.c if (time_is_before_jiffies(wb->bw_time_stamp + wb 1708 mm/page-writeback.c spin_lock(&wb->list_lock); wb 1710 mm/page-writeback.c spin_unlock(&wb->list_lock); wb 1714 mm/page-writeback.c dirty_ratelimit = wb->dirty_ratelimit; wb 1717 mm/page-writeback.c max_pause = wb_max_pause(wb, sdtc->wb_dirty); wb 1718 mm/page-writeback.c min_pause = wb_min_pause(wb, max_pause, wb 1739 mm/page-writeback.c trace_balance_dirty_pages(wb, wb 1768 mm/page-writeback.c trace_balance_dirty_pages(wb, wb 1781 mm/page-writeback.c wb->dirty_sleep = now; wb 1812 mm/page-writeback.c if (!dirty_exceeded && wb->dirty_exceeded) wb 1813 mm/page-writeback.c wb->dirty_exceeded = 0; wb 1815 mm/page-writeback.c if (writeback_in_progress(wb)) wb 1830 mm/page-writeback.c wb_start_background_writeback(wb); wb 1868 mm/page-writeback.c struct bdi_writeback *wb = NULL; wb 1876 mm/page-writeback.c wb = wb_get_create_current(bdi, GFP_KERNEL); wb 1877 mm/page-writeback.c if (!wb) wb 1878 mm/page-writeback.c wb = &bdi->wb; wb 1881 mm/page-writeback.c if (wb->dirty_exceeded) wb 1913 mm/page-writeback.c balance_dirty_pages(wb, current->nr_dirtied); wb 1915 mm/page-writeback.c wb_put(wb); wb 1928 mm/page-writeback.c bool wb_over_bg_thresh(struct bdi_writeback *wb) wb 1930 mm/page-writeback.c struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) }; wb 1931 mm/page-writeback.c struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) }; wb 1948 mm/page-writeback.c if (wb_stat(wb, WB_RECLAIMABLE) > wb 1949 mm/page-writeback.c wb_calc_thresh(gdtc->wb, gdtc->bg_thresh)) wb 1955 mm/page-writeback.c mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty, wb 1963 mm/page-writeback.c if (wb_stat(wb, WB_RECLAIMABLE) > wb 1964 mm/page-writeback.c wb_calc_thresh(mdtc->wb, mdtc->bg_thresh)) wb 2419 mm/page-writeback.c struct bdi_writeback *wb; wb 2422 mm/page-writeback.c wb = inode_to_wb(inode); wb 2427 mm/page-writeback.c inc_wb_stat(wb, WB_RECLAIMABLE); wb 2428 mm/page-writeback.c inc_wb_stat(wb, WB_DIRTIED); wb 2433 mm/page-writeback.c mem_cgroup_track_foreign_dirty(page, wb); wb 2443 mm/page-writeback.c struct bdi_writeback *wb) wb 2448 mm/page-writeback.c dec_wb_stat(wb, WB_RECLAIMABLE); wb 2510 mm/page-writeback.c struct bdi_writeback *wb; wb 2513 mm/page-writeback.c wb = unlocked_inode_to_wb_begin(inode, &cookie); wb 2516 mm/page-writeback.c dec_wb_stat(wb, WB_DIRTIED); wb 2622 mm/page-writeback.c struct bdi_writeback *wb; wb 2626 mm/page-writeback.c wb = unlocked_inode_to_wb_begin(inode, &cookie); wb 2629 mm/page-writeback.c account_page_cleaned(page, mapping, wb); wb 2662 mm/page-writeback.c struct bdi_writeback *wb; wb 2700 mm/page-writeback.c wb = unlocked_inode_to_wb_begin(inode, &cookie); wb 2704 mm/page-writeback.c dec_wb_stat(wb, WB_RECLAIMABLE); wb 2734 mm/page-writeback.c struct bdi_writeback *wb = inode_to_wb(inode); wb 2736 mm/page-writeback.c dec_wb_stat(wb, WB_WRITEBACK); wb 2737 mm/page-writeback.c __wb_writeout_inc(wb); wb 72 scripts/extract-cert.c static BIO *wb; wb 80 scripts/extract-cert.c if (!wb) { wb 81 scripts/extract-cert.c wb = BIO_new_file(cert_dst, "wb"); wb 82 scripts/extract-cert.c ERR(!wb, "%s", cert_dst); wb 85 scripts/extract-cert.c ERR(!i2d_X509_bio(wb, x509), "%s", cert_dst); wb 146 scripts/extract-cert.c if (wb && !x509) { wb 159 scripts/extract-cert.c BIO_free(wb); wb 677 scripts/kallsyms.c int wa, wb; wb 690 scripts/kallsyms.c wb = (sb->sym[0] == 'w') || (sb->sym[0] == 'W'); wb 691 scripts/kallsyms.c if (wa != wb) wb 692 scripts/kallsyms.c return wa - wb; wb 696 scripts/kallsyms.c wb = may_be_linker_script_provide_symbol(sb); wb 697 scripts/kallsyms.c if (wa != wb) wb 698 scripts/kallsyms.c return wa - wb; wb 702 scripts/kallsyms.c wb = prefix_underscores_count((const char *)sb->sym + 1); wb 703 scripts/kallsyms.c if (wa != wb) wb 704 scripts/kallsyms.c return wa - wb; wb 617 security/apparmor/match.c #define inc_wb_pos(wb) \ wb 619 security/apparmor/match.c wb->pos = (wb->pos + 1) & (wb->size - 1); \ wb 620 security/apparmor/match.c wb->len = (wb->len + 1) & (wb->size - 1); \ wb 624 security/apparmor/match.c static bool is_loop(struct match_workbuf *wb, unsigned int state, wb 627 security/apparmor/match.c unsigned int pos = wb->pos; wb 630 security/apparmor/match.c if (wb->history[pos] < state) wb 633 security/apparmor/match.c for (i = 0; i <= wb->len; i++) { wb 634 security/apparmor/match.c if (wb->history[pos] == state) { wb 639 security/apparmor/match.c pos = wb->size; wb 648 security/apparmor/match.c const char *str, struct match_workbuf *wb, wb 659 security/apparmor/match.c AA_BUG(!wb); wb 674 security/apparmor/match.c wb->history[wb->pos] = state; wb 680 security/apparmor/match.c if (is_loop(wb, state, &adjust)) { wb 685 security/apparmor/match.c inc_wb_pos(wb); wb 693 security/apparmor/match.c wb->history[wb->pos] = state; wb 699 security/apparmor/match.c if (is_loop(wb, state, &adjust)) { wb 704 security/apparmor/match.c inc_wb_pos(wb); wb 731 security/apparmor/match.c DEFINE_MATCH_WB(wb); wb 735 security/apparmor/match.c return leftmatch_fb(dfa, start, str, &wb, count);