shadow 26 arch/alpha/include/asm/mce.h unsigned long shadow[8]; /* Shadow reg. 8-14, 25 */ shadow 459 arch/alpha/kernel/core_mcpcia.c i, i+1, frame->shadow[i], shadow 460 arch/alpha/kernel/core_mcpcia.c frame->shadow[i+1]); shadow 137 arch/m68k/fpsp040/fpsp.h .set FPSR_SHADOW,LV-64 | fpsr shadow reg shadow 19 arch/mips/include/asm/sn/ioc3.h u32 shadow; shadow 112 arch/powerpc/platforms/ps3/spu.c struct spe_shadow __iomem *shadow; shadow 124 arch/powerpc/platforms/ps3/spu.c unsigned long problem, unsigned long ls, unsigned long shadow, shadow 131 arch/powerpc/platforms/ps3/spu.c pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow); shadow 180 arch/powerpc/platforms/ps3/spu.c iounmap(spu_pdata(spu)->shadow); shadow 195 arch/powerpc/platforms/ps3/spu.c spu_pdata(spu)->shadow = ioremap_prot(spu_pdata(spu)->shadow_addr, shadow 197 arch/powerpc/platforms/ps3/spu.c if (!spu_pdata(spu)->shadow) { shadow 231 arch/powerpc/platforms/ps3/spu.c (unsigned long)spu_pdata(spu)->shadow); shadow 368 arch/powerpc/platforms/ps3/spu.c while (in_be64(&spu_pdata(spu)->shadow->spe_execution_status) shadow 514 arch/powerpc/platforms/ps3/spu.c return in_be64(&spu_pdata(spu)->shadow->mfc_dar_RW); shadow 524 arch/powerpc/platforms/ps3/spu.c return in_be64(&spu_pdata(spu)->shadow->mfc_dsisr_RW); shadow 362 arch/s390/kvm/gaccess.h int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow, shadow 310 arch/x86/include/uapi/asm/kvm.h __u8 shadow; shadow 662 arch/x86/kvm/vmx/nested.c struct vmcs12 *shadow; shadow 668 arch/x86/kvm/vmx/nested.c shadow = get_shadow_vmcs12(vcpu); shadow 673 arch/x86/kvm/vmx/nested.c memcpy(shadow, map.hva, VMCS12_SIZE); shadow 2738 arch/x86/kvm/vmx/nested.c struct vmcs12 *shadow; shadow 2750 arch/x86/kvm/vmx/nested.c shadow = map.hva; shadow 2752 arch/x86/kvm/vmx/nested.c if (CC(shadow->hdr.revision_id != VMCS12_REVISION) || shadow 2753 arch/x86/kvm/vmx/nested.c CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))) shadow 2502 arch/x86/kvm/vmx/vmx.c struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags) shadow 2520 arch/x86/kvm/vmx/vmx.c if (shadow) shadow 476 arch/x86/kvm/vmx/vmx.h struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags); shadow 483 arch/x86/kvm/vmx/vmx.h static inline struct vmcs *alloc_vmcs(bool shadow) shadow 485 arch/x86/kvm/vmx/vmx.h return alloc_vmcs_cpu(shadow, raw_smp_processor_id(), shadow 3801 arch/x86/kvm/x86.c events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); shadow 3873 arch/x86/kvm/x86.c events->interrupt.shadow); shadow 519 arch/x86/xen/enlighten_pv.c struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i]; shadow 524 arch/x86/xen/enlighten_pv.c if (desc_equal(shadow, &t->tls_array[i])) shadow 527 arch/x86/xen/enlighten_pv.c *shadow = t->tls_array[i]; shadow 1585 block/sed-opal.c struct opal_shadow_mbr *shadow = data; shadow 1600 block/sed-opal.c if (shadow->size > len || shadow->offset > len - shadow->size) { shadow 1602 block/sed-opal.c shadow->offset + shadow->size, len); shadow 1607 block/sed-opal.c src = (u8 __user *)(uintptr_t)shadow->data; shadow 1608 block/sed-opal.c while (off < shadow->size) { shadow 1612 block/sed-opal.c add_token_u64(&err, dev, shadow->offset + off); shadow 1625 block/sed-opal.c (size_t)(shadow->size - off)); shadow 1627 block/sed-opal.c off, len, shadow->size); shadow 181 drivers/block/xen-blkfront.c struct blk_shadow shadow[BLK_MAX_RING_SIZE]; shadow 271 drivers/block/xen-blkfront.c rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id; shadow 272 drivers/block/xen-blkfront.c rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */ shadow 279 drivers/block/xen-blkfront.c if (rinfo->shadow[id].req.u.rw.id != id) shadow 281 drivers/block/xen-blkfront.c if (rinfo->shadow[id].request == NULL) shadow 283 drivers/block/xen-blkfront.c rinfo->shadow[id].req.u.rw.id = rinfo->shadow_free; shadow 284 drivers/block/xen-blkfront.c rinfo->shadow[id].request = NULL; shadow 535 drivers/block/xen-blkfront.c rinfo->shadow[id].request = req; shadow 536 drivers/block/xen-blkfront.c rinfo->shadow[id].status = REQ_WAITING; shadow 537 drivers/block/xen-blkfront.c rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID; shadow 563 drivers/block/xen-blkfront.c rinfo->shadow[id].req = *ring_req; shadow 601 drivers/block/xen-blkfront.c struct blk_shadow *shadow = &rinfo->shadow[setup->id]; shadow 620 drivers/block/xen-blkfront.c shadow->indirect_grants[n] = gnt_list_entry; shadow 631 drivers/block/xen-blkfront.c shadow->grants_used[setup->grant_idx] = gnt_list_entry; shadow 742 drivers/block/xen-blkfront.c num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg); shadow 745 drivers/block/xen-blkfront.c for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) shadow 752 drivers/block/xen-blkfront.c rinfo->shadow[id].num_sg = num_sg; shadow 796 drivers/block/xen-blkfront.c rinfo->shadow[extra_id].num_sg = 0; shadow 801 drivers/block/xen-blkfront.c rinfo->shadow[extra_id].associated_id = id; shadow 802 drivers/block/xen-blkfront.c rinfo->shadow[id].associated_id = extra_id; shadow 813 drivers/block/xen-blkfront.c for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) { shadow 834 drivers/block/xen-blkfront.c rinfo->shadow[id].req = *ring_req; shadow 836 drivers/block/xen-blkfront.c rinfo->shadow[extra_id].req = *extra_ring_req; shadow 1285 drivers/block/xen-blkfront.c if (!rinfo->shadow[i].request) shadow 1288 drivers/block/xen-blkfront.c segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ? shadow 1289 drivers/block/xen-blkfront.c rinfo->shadow[i].req.u.indirect.nr_segments : shadow 1290 drivers/block/xen-blkfront.c rinfo->shadow[i].req.u.rw.nr_segments; shadow 1292 drivers/block/xen-blkfront.c persistent_gnt = rinfo->shadow[i].grants_used[j]; shadow 1299 drivers/block/xen-blkfront.c if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT) shadow 1307 drivers/block/xen-blkfront.c persistent_gnt = rinfo->shadow[i].indirect_grants[j]; shadow 1314 drivers/block/xen-blkfront.c kvfree(rinfo->shadow[i].grants_used); shadow 1315 drivers/block/xen-blkfront.c rinfo->shadow[i].grants_used = NULL; shadow 1316 drivers/block/xen-blkfront.c kvfree(rinfo->shadow[i].indirect_grants); shadow 1317 drivers/block/xen-blkfront.c rinfo->shadow[i].indirect_grants = NULL; shadow 1318 drivers/block/xen-blkfront.c kvfree(rinfo->shadow[i].sg); shadow 1319 drivers/block/xen-blkfront.c rinfo->shadow[i].sg = NULL; shadow 1427 drivers/block/xen-blkfront.c struct blk_shadow *s = &rinfo->shadow[*id]; shadow 1437 drivers/block/xen-blkfront.c struct blk_shadow *s2 = &rinfo->shadow[s->associated_id]; shadow 1582 drivers/block/xen-blkfront.c req = rinfo->shadow[id].request; shadow 1625 drivers/block/xen-blkfront.c rinfo->shadow[id].req.u.rw.nr_segments == 0)) { shadow 1880 drivers/block/xen-blkfront.c rinfo->shadow[j].req.u.rw.id = j + 1; shadow 1881 drivers/block/xen-blkfront.c rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; shadow 2086 drivers/block/xen-blkfront.c struct blk_shadow *shadow = rinfo->shadow; shadow 2090 drivers/block/xen-blkfront.c if (!shadow[j].request) shadow 2096 drivers/block/xen-blkfront.c if (req_op(shadow[j].request) == REQ_OP_FLUSH || shadow 2097 drivers/block/xen-blkfront.c req_op(shadow[j].request) == REQ_OP_DISCARD || shadow 2098 drivers/block/xen-blkfront.c req_op(shadow[j].request) == REQ_OP_SECURE_ERASE || shadow 2099 drivers/block/xen-blkfront.c shadow[j].request->cmd_flags & REQ_FUA) { shadow 2107 drivers/block/xen-blkfront.c list_add(&shadow[j].request->queuelist, &info->requests); shadow 2110 drivers/block/xen-blkfront.c merge_bio.head = shadow[j].request->bio; shadow 2111 drivers/block/xen-blkfront.c merge_bio.tail = shadow[j].request->biotail; shadow 2113 drivers/block/xen-blkfront.c shadow[j].request->bio = NULL; shadow 2114 drivers/block/xen-blkfront.c blk_mq_end_request(shadow[j].request, BLK_STS_OK); shadow 2237 drivers/block/xen-blkfront.c rinfo->shadow[i].grants_used = shadow 2239 drivers/block/xen-blkfront.c sizeof(rinfo->shadow[i].grants_used[0]), shadow 2241 drivers/block/xen-blkfront.c rinfo->shadow[i].sg = kvcalloc(psegs, shadow 2242 drivers/block/xen-blkfront.c sizeof(rinfo->shadow[i].sg[0]), shadow 2245 drivers/block/xen-blkfront.c rinfo->shadow[i].indirect_grants = shadow 2247 drivers/block/xen-blkfront.c sizeof(rinfo->shadow[i].indirect_grants[0]), shadow 2249 drivers/block/xen-blkfront.c if ((rinfo->shadow[i].grants_used == NULL) || shadow 2250 drivers/block/xen-blkfront.c (rinfo->shadow[i].sg == NULL) || shadow 2252 drivers/block/xen-blkfront.c (rinfo->shadow[i].indirect_grants == NULL))) shadow 2254 drivers/block/xen-blkfront.c sg_init_table(rinfo->shadow[i].sg, psegs); shadow 2263 drivers/block/xen-blkfront.c kvfree(rinfo->shadow[i].grants_used); shadow 2264 drivers/block/xen-blkfront.c rinfo->shadow[i].grants_used = NULL; shadow 2265 drivers/block/xen-blkfront.c kvfree(rinfo->shadow[i].sg); shadow 2266 drivers/block/xen-blkfront.c rinfo->shadow[i].sg = NULL; shadow 2267 drivers/block/xen-blkfront.c kvfree(rinfo->shadow[i].indirect_grants); shadow 2268 drivers/block/xen-blkfront.c rinfo->shadow[i].indirect_grants = NULL; shadow 540 drivers/crypto/hisilicon/sec/sec_algs.c void sec_alg_callback(struct sec_bd_info *resp, void *shadow) shadow 542 drivers/crypto/hisilicon/sec/sec_algs.c struct sec_request *sec_req = shadow; shadow 708 drivers/crypto/hisilicon/sec/sec_drv.c queue->shadow[queue->expected]); shadow 709 drivers/crypto/hisilicon/sec/sec_drv.c queue->shadow[queue->expected] = NULL; shadow 871 drivers/crypto/hisilicon/sec/sec_drv.c queue->shadow[write] = ctx; shadow 351 drivers/crypto/hisilicon/sec/sec_drv.h void *shadow[SEC_QUEUE_LEN]; shadow 60 drivers/gpio/gpio-janz-ttl.c u8 *shadow; shadow 64 drivers/gpio/gpio-janz-ttl.c shadow = &mod->porta_shadow; shadow 66 drivers/gpio/gpio-janz-ttl.c shadow = &mod->portb_shadow; shadow 69 drivers/gpio/gpio-janz-ttl.c shadow = &mod->portc_shadow; shadow 74 drivers/gpio/gpio-janz-ttl.c ret = *shadow & BIT(offset); shadow 83 drivers/gpio/gpio-janz-ttl.c u8 *shadow; shadow 87 drivers/gpio/gpio-janz-ttl.c shadow = &mod->porta_shadow; shadow 90 drivers/gpio/gpio-janz-ttl.c shadow = &mod->portb_shadow; shadow 94 drivers/gpio/gpio-janz-ttl.c shadow = &mod->portc_shadow; shadow 100 drivers/gpio/gpio-janz-ttl.c *shadow |= BIT(offset); shadow 102 drivers/gpio/gpio-janz-ttl.c *shadow &= ~BIT(offset); shadow 104 drivers/gpio/gpio-janz-ttl.c iowrite16be(*shadow, port); shadow 31 drivers/gpio/gpio-mm-lantiq.c u16 shadow; /* shadow the latches state */ shadow 47 drivers/gpio/gpio-mm-lantiq.c __raw_writew(chip->shadow, chip->mmchip.regs); shadow 65 drivers/gpio/gpio-mm-lantiq.c chip->shadow |= (1 << offset); shadow 67 drivers/gpio/gpio-mm-lantiq.c chip->shadow &= ~(1 << offset); shadow 104 drivers/gpio/gpio-mm-lantiq.c u32 shadow; shadow 118 drivers/gpio/gpio-mm-lantiq.c if (!of_property_read_u32(pdev->dev.of_node, "lantiq,shadow", &shadow)) shadow 119 drivers/gpio/gpio-mm-lantiq.c chip->shadow = shadow; shadow 78 drivers/gpio/gpio-stp-xway.c u32 shadow; /* shadow the shift registers state */ shadow 113 drivers/gpio/gpio-stp-xway.c chip->shadow |= BIT(gpio); shadow 115 drivers/gpio/gpio-stp-xway.c chip->shadow &= ~BIT(gpio); shadow 116 drivers/gpio/gpio-stp-xway.c xway_stp_w32(chip->virt, chip->shadow, XWAY_STP_CPU0); shadow 205 drivers/gpio/gpio-stp-xway.c u32 shadow, groups, dsl, phy; shadow 228 drivers/gpio/gpio-stp-xway.c if (!of_property_read_u32(pdev->dev.of_node, "lantiq,shadow", &shadow)) shadow 229 drivers/gpio/gpio-stp-xway.c chip->shadow = shadow; shadow 524 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c if (bo->shadow) shadow 525 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c r = amdgpu_cs_bo_validate(p, bo->shadow); shadow 3395 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c struct amdgpu_bo *shadow; shadow 3405 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { shadow 3408 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c if (shadow->tbo.mem.mem_type != TTM_PL_TT || shadow 3409 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET || shadow 3410 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) shadow 3413 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c r = amdgpu_bo_restore_shadow(shadow, &next); shadow 628 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c if (bo->shadow) shadow 639 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c r = amdgpu_bo_do_create(adev, &bp, &bo->shadow); shadow 641 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c bo->shadow->parent = amdgpu_bo_ref(bo); shadow 643 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list); shadow 740 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence) shadow 743 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev); shadow 747 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c shadow_addr = amdgpu_bo_gpu_offset(shadow); shadow 748 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c parent_addr = amdgpu_bo_gpu_offset(shadow->parent); shadow 751 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c amdgpu_bo_size(shadow), NULL, fence, shadow 98 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h struct amdgpu_bo *shadow; shadow 277 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, shadow 623 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (bo->shadow) shadow 624 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c ttm_bo_move_to_lru_tail(&bo->shadow->tbo, shadow 749 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (bo->shadow) { shadow 750 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement, shadow 831 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c else if (!vm->root.base.bo || vm->root.base.bo->shadow) shadow 893 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_bo_unref(&pt->shadow); shadow 908 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_bo_unref(&entry->base.bo->shadow); shadow 2509 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (bo->parent && bo->parent->shadow == bo) shadow 2764 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_bo_unref(&vm->root.base.bo->shadow); shadow 2892 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_bo_unref(&vm->root.base.bo->shadow); shadow 44 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c if (table->shadow) shadow 45 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c r = amdgpu_ttm_alloc_gart(&table->shadow->tbo); shadow 229 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c if (bo->shadow) shadow 230 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr, shadow 239 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c (bo->shadow ? 2 : 1); shadow 254 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c if (bo->shadow) shadow 255 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes); shadow 2120 drivers/gpu/drm/drm_fb_helper.c void *shadow = NULL; shadow 2127 drivers/gpu/drm/drm_fb_helper.c shadow = fbi->screen_buffer; shadow 2133 drivers/gpu/drm/drm_fb_helper.c if (shadow) { shadow 2134 drivers/gpu/drm/drm_fb_helper.c vfree(shadow); shadow 2235 drivers/gpu/drm/drm_fb_helper.c void *shadow; shadow 2242 drivers/gpu/drm/drm_fb_helper.c shadow = vzalloc(fbi->screen_size); shadow 2243 drivers/gpu/drm/drm_fb_helper.c if (!fbops || !shadow) { shadow 2245 drivers/gpu/drm/drm_fb_helper.c vfree(shadow); shadow 2251 drivers/gpu/drm/drm_fb_helper.c fbi->screen_buffer = shadow; shadow 353 drivers/gpu/drm/exynos/exynos_mixer.c u32 base, shadow; shadow 365 drivers/gpu/drm/exynos/exynos_mixer.c shadow = mixer_reg_read(ctx, MXR_CFG_S); shadow 366 drivers/gpu/drm/exynos/exynos_mixer.c if (base != shadow) shadow 370 drivers/gpu/drm/exynos/exynos_mixer.c shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); shadow 371 drivers/gpu/drm/exynos/exynos_mixer.c if (base != shadow) shadow 375 drivers/gpu/drm/exynos/exynos_mixer.c shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1)); shadow 376 drivers/gpu/drm/exynos/exynos_mixer.c if (base != shadow) shadow 152 drivers/gpu/drm/i915/gvt/gvt.h struct intel_context *shadow[I915_NUM_ENGINES]; shadow 1577 drivers/gpu/drm/i915/gvt/kvmgt.c vgpu->submission.shadow[0]->gem_context->hw_id); shadow 511 drivers/gpu/drm/i915/gvt/mmio_context.c !is_inhibit_context(s->shadow[ring_id])) shadow 400 drivers/gpu/drm/i915/gvt/scheduler.c rq = i915_request_create(s->shadow[workload->ring_id]); shadow 427 drivers/gpu/drm/i915/gvt/scheduler.c if (workload->shadow) shadow 431 drivers/gpu/drm/i915/gvt/scheduler.c shadow_context_descriptor_update(s->shadow[workload->ring_id], shadow 444 drivers/gpu/drm/i915/gvt/scheduler.c workload->shadow = true; shadow 641 drivers/gpu/drm/i915/gvt/scheduler.c set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context); shadow 1178 drivers/gpu/drm/i915/gvt/scheduler.c i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm)); shadow 1180 drivers/gpu/drm/i915/gvt/scheduler.c intel_context_unpin(s->shadow[id]); shadow 1259 drivers/gpu/drm/i915/gvt/scheduler.c s->shadow[i] = ERR_PTR(-EINVAL); shadow 1278 drivers/gpu/drm/i915/gvt/scheduler.c s->shadow[i] = ce; shadow 1305 drivers/gpu/drm/i915/gvt/scheduler.c if (IS_ERR(s->shadow[i])) shadow 1308 drivers/gpu/drm/i915/gvt/scheduler.c intel_context_unpin(s->shadow[i]); shadow 1309 drivers/gpu/drm/i915/gvt/scheduler.c intel_context_put(s->shadow[i]); shadow 86 drivers/gpu/drm/i915/gvt/scheduler.h bool shadow; /* if workload has done shadow of guest request */ shadow 40 drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h void nvkm_pci_rom_shadow(struct nvkm_pci *, bool shadow); shadow 39 drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto) shadow 52 drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) shadow 110 drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c shadow_method(struct nvkm_bios *bios, struct shadow *mthd, const char *name) shadow 171 drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c struct shadow mthds[] = { shadow 215 drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c struct mxm_shadow_h *shadow = _mxm_shadow; shadow 217 drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c nvkm_debug(&mxm->subdev, "checking %s\n", shadow->name); shadow 218 drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c if (shadow->exec(mxm, version)) { shadow 224 drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c } while ((++shadow)->name); shadow 58 drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow) shadow 61 drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c if (shadow) shadow 426 drivers/gpu/drm/qxl/qxl_display.c is_primary = qobj->shadow ? qobj->shadow->is_primary : qobj->is_primary; shadow 550 drivers/gpu/drm/qxl/qxl_display.c primary = bo->shadow ? bo->shadow : bo; shadow 807 drivers/gpu/drm/qxl/qxl_display.c if (user_bo->shadow != qdev->dumb_shadow_bo) { shadow 808 drivers/gpu/drm/qxl/qxl_display.c if (user_bo->shadow) { shadow 810 drivers/gpu/drm/qxl/qxl_display.c (&user_bo->shadow->tbo.base); shadow 811 drivers/gpu/drm/qxl/qxl_display.c user_bo->shadow = NULL; shadow 814 drivers/gpu/drm/qxl/qxl_display.c user_bo->shadow = qdev->dumb_shadow_bo; shadow 843 drivers/gpu/drm/qxl/qxl_display.c if (old_state->fb != plane->state->fb && user_bo->shadow) { shadow 844 drivers/gpu/drm/qxl/qxl_display.c drm_gem_object_put_unlocked(&user_bo->shadow->tbo.base); shadow 845 drivers/gpu/drm/qxl/qxl_display.c user_bo->shadow = NULL; shadow 90 drivers/gpu/drm/qxl/qxl_drv.h struct qxl_bo *shadow; shadow 2056 drivers/hv/vmbus_drv.c struct resource *iter, *shadow; shadow 2092 drivers/hv/vmbus_drv.c shadow = __request_region(iter, start, size, NULL, shadow 2094 drivers/hv/vmbus_drv.c if (!shadow) shadow 2099 drivers/hv/vmbus_drv.c shadow->name = (char *)*new; shadow 439 drivers/infiniband/hw/i40iw/i40iw_verbs.c init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow); shadow 1149 drivers/infiniband/hw/i40iw/i40iw_verbs.c info.shadow_area_pa = cpu_to_le64(cqmr->shadow); shadow 1160 drivers/infiniband/hw/i40iw/i40iw_verbs.c int shadow; shadow 1164 drivers/infiniband/hw/i40iw/i40iw_verbs.c shadow = I40IW_SHADOW_AREA_SIZE << 3; shadow 1166 drivers/infiniband/hw/i40iw/i40iw_verbs.c rsize + shadow, 256); shadow 1466 drivers/infiniband/hw/i40iw/i40iw_verbs.c qpmr->shadow = (dma_addr_t)arr[total]; shadow 1485 drivers/infiniband/hw/i40iw/i40iw_verbs.c cqmr->shadow = (dma_addr_t)arr[total]; shadow 63 drivers/infiniband/hw/i40iw/i40iw_verbs.h dma_addr_t shadow; shadow 69 drivers/infiniband/hw/i40iw/i40iw_verbs.h dma_addr_t shadow; shadow 7036 drivers/infiniband/hw/qib/qib_iba7322.c unsigned long shadow = 0; shadow 7051 drivers/infiniband/hw/qib/qib_iba7322.c shadow = (unsigned long) shadow 7057 drivers/infiniband/hw/qib/qib_iba7322.c % BITS_PER_LONG, &shadow)) shadow 287 drivers/infiniband/hw/qib/qib_tx.c unsigned long *shadow = dd->pioavailshadow; shadow 319 drivers/infiniband/hw/qib/qib_tx.c if (__test_and_set_bit((2 * i) + 1, shadow)) shadow 322 drivers/infiniband/hw/qib/qib_tx.c __change_bit(2 * i, shadow); shadow 77 drivers/media/dvb-frontends/itd1000.c itd1000_write_regs(state, (reg - 1) & 0xff, &state->shadow[(reg - 1) & 0xff], 1); shadow 90 drivers/media/dvb-frontends/itd1000.c state->shadow[r] = tmp; shadow 382 drivers/media/dvb-frontends/itd1000.c memset(state->shadow, 0xff, sizeof(state->shadow)); shadow 384 drivers/media/dvb-frontends/itd1000.c state->shadow[i] = itd1000_read_reg(state, i); shadow 20 drivers/media/dvb-frontends/itd1000_priv.h u8 shadow[256]; shadow 48 drivers/media/dvb-frontends/s5h1420.c u8 shadow[256]; shadow 76 drivers/media/dvb-frontends/s5h1420.c b[1] = state->shadow[(reg - 1) & 0xff]; shadow 108 drivers/media/dvb-frontends/s5h1420.c state->shadow[reg] = data; shadow 895 drivers/media/dvb-frontends/s5h1420.c memset(state->shadow, 0xff, sizeof(state->shadow)); shadow 898 drivers/media/dvb-frontends/s5h1420.c state->shadow[i] = s5h1420_readreg(state, i); shadow 126 drivers/media/i2c/tvaudio.c audiocmd shadow; shadow 163 drivers/media/i2c/tvaudio.c chip->shadow.bytes[1] = val; shadow 173 drivers/media/i2c/tvaudio.c if (subaddr + 1 >= ARRAY_SIZE(chip->shadow.bytes)) { shadow 182 drivers/media/i2c/tvaudio.c chip->shadow.bytes[subaddr+1] = val; shadow 204 drivers/media/i2c/tvaudio.c val = (chip->shadow.bytes[1] & ~mask) | (val & mask); shadow 206 drivers/media/i2c/tvaudio.c if (subaddr + 1 >= ARRAY_SIZE(chip->shadow.bytes)) { shadow 213 drivers/media/i2c/tvaudio.c val = (chip->shadow.bytes[subaddr+1] & ~mask) | (val & mask); shadow 281 drivers/media/i2c/tvaudio.c if (cmd->count + cmd->bytes[0] - 1 >= ARRAY_SIZE(chip->shadow.bytes)) { shadow 296 drivers/media/i2c/tvaudio.c chip->shadow.bytes[i+cmd->bytes[0]] = cmd->bytes[i]; shadow 441 drivers/media/i2c/tvaudio.c int t = chip->shadow.bytes[TDA9840_SW + 1] & ~0x7e; shadow 606 drivers/media/i2c/tvaudio.c int c6 = chip->shadow.bytes[TDA985x_C6+1] & 0x3f; shadow 772 drivers/media/i2c/tvaudio.c int sw_data = chip->shadow.bytes[TDA9873_SW+1] & ~ TDA9873_TR_MASK; shadow 783 drivers/media/i2c/tvaudio.c TDA9873_SW+1, chip->shadow.bytes[TDA9873_SW+1]); shadow 975 drivers/media/i2c/tvaudio.c chip->shadow.bytes[MAXREGS-2] = dsr; shadow 976 drivers/media/i2c/tvaudio.c chip->shadow.bytes[MAXREGS-1] = nsr; shadow 1011 drivers/media/i2c/tvaudio.c if(chip->shadow.bytes[MAXREGS-2] & 0x20) /* DSR.RSSF=1 */ shadow 1343 drivers/media/i2c/tvaudio.c int s1 = chip->shadow.bytes[TDA8425_S1+1] & 0xe1; shadow 1986 drivers/media/i2c/tvaudio.c chip->shadow.count = desc->registers+1; shadow 345 drivers/media/pci/cx88/cx88.h u32 shadow[SHADOW_MAX]; shadow 596 drivers/media/pci/cx88/cx88.h #define cx_sread(sreg) (core->shadow[sreg]) shadow 598 drivers/media/pci/cx88/cx88.h (core->shadow[sreg] = value, \ shadow 599 drivers/media/pci/cx88/cx88.h writel(core->shadow[sreg], core->lmmio + ((reg) >> 2))) shadow 601 drivers/media/pci/cx88/cx88.h (core->shadow[sreg] = (core->shadow[sreg] & ~(mask)) | \ shadow 603 drivers/media/pci/cx88/cx88.h writel(core->shadow[sreg], \ shadow 672 drivers/media/platform/omap3isp/isppreview.c preview_params_lock(struct isp_prev_device *prev, u32 update, bool shadow) shadow 676 drivers/media/platform/omap3isp/isppreview.c if (shadow) { shadow 693 drivers/media/platform/omap3isp/isppreview.c preview_params_unlock(struct isp_prev_device *prev, u32 update, bool shadow) shadow 697 drivers/media/platform/omap3isp/isppreview.c if (shadow) { shadow 64 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h struct mlx5e_ktls_offload_context_tx_shadow *shadow; shadow 66 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX); shadow 68 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx; shadow 70 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h shadow->priv_tx = priv_tx; shadow 78 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h struct mlx5e_ktls_offload_context_tx_shadow *shadow; shadow 80 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX); shadow 82 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx; shadow 84 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h return shadow->priv_tx; shadow 126 drivers/net/ethernet/myricom/myri10ge/myri10ge.c struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */ shadow 1240 drivers/net/ethernet/myricom/myri10ge/myri10ge.c rx->shadow[idx].addr_low = shadow 1242 drivers/net/ethernet/myricom/myri10ge/myri10ge.c rx->shadow[idx].addr_high = shadow 1259 drivers/net/ethernet/myricom/myri10ge/myri10ge.c &rx->shadow[idx - 7]); shadow 1980 drivers/net/ethernet/myricom/myri10ge/myri10ge.c bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow); shadow 1981 drivers/net/ethernet/myricom/myri10ge/myri10ge.c ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL); shadow 1982 drivers/net/ethernet/myricom/myri10ge/myri10ge.c if (ss->rx_small.shadow == NULL) shadow 1985 drivers/net/ethernet/myricom/myri10ge/myri10ge.c bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow); shadow 1986 drivers/net/ethernet/myricom/myri10ge/myri10ge.c ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL); shadow 1987 drivers/net/ethernet/myricom/myri10ge/myri10ge.c if (ss->rx_big.shadow == NULL) shadow 2065 drivers/net/ethernet/myricom/myri10ge/myri10ge.c kfree(ss->rx_big.shadow); shadow 2068 drivers/net/ethernet/myricom/myri10ge/myri10ge.c kfree(ss->rx_small.shadow); shadow 2142 drivers/net/ethernet/myricom/myri10ge/myri10ge.c kfree(ss->rx_big.shadow); shadow 2144 drivers/net/ethernet/myricom/myri10ge/myri10ge.c kfree(ss->rx_small.shadow); shadow 1492 drivers/net/ethernet/qlogic/qed/qed_vf.c struct qed_bulletin_content shadow; shadow 1499 drivers/net/ethernet/qlogic/qed/qed_vf.c memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); shadow 1502 drivers/net/ethernet/qlogic/qed/qed_vf.c if (shadow.version == p_iov->bulletin_shadow.version) shadow 1506 drivers/net/ethernet/qlogic/qed/qed_vf.c crc = crc32(0, (u8 *)&shadow + crc_size, shadow 1508 drivers/net/ethernet/qlogic/qed/qed_vf.c if (crc != shadow.crc) shadow 1512 drivers/net/ethernet/qlogic/qed/qed_vf.c memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); shadow 1515 drivers/net/ethernet/qlogic/qed/qed_vf.c "Read a bulletin update %08x\n", shadow.version); shadow 141 drivers/net/phy/bcm-phy-lib.c int bcm_phy_read_shadow(struct phy_device *phydev, u16 shadow) shadow 143 drivers/net/phy/bcm-phy-lib.c phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow)); shadow 148 drivers/net/phy/bcm-phy-lib.c int bcm_phy_write_shadow(struct phy_device *phydev, u16 shadow, shadow 153 drivers/net/phy/bcm-phy-lib.c MII_BCM54XX_SHD_VAL(shadow) | shadow 344 drivers/net/phy/bcm-phy-lib.c static u64 bcm_phy_get_stat(struct phy_device *phydev, u64 *shadow, shadow 357 drivers/net/phy/bcm-phy-lib.c shadow[i] += val; shadow 358 drivers/net/phy/bcm-phy-lib.c ret = shadow[i]; shadow 364 drivers/net/phy/bcm-phy-lib.c void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow, shadow 370 drivers/net/phy/bcm-phy-lib.c data[i] = bcm_phy_get_stat(phydev, shadow, i); shadow 47 drivers/net/phy/bcm-phy-lib.h int bcm_phy_write_shadow(struct phy_device *phydev, u16 shadow, shadow 49 drivers/net/phy/bcm-phy-lib.h int bcm_phy_read_shadow(struct phy_device *phydev, u16 shadow); shadow 64 drivers/net/phy/bcm-phy-lib.h void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow, shadow 21 drivers/pinctrl/sh-pfc/gpio.c u32 shadow; shadow 114 drivers/pinctrl/sh-pfc/gpio.c chip->regs[i].shadow = gpio_read_data_reg(chip, dreg); shadow 159 drivers/pinctrl/sh-pfc/gpio.c reg->shadow |= BIT(pos); shadow 161 drivers/pinctrl/sh-pfc/gpio.c reg->shadow &= ~BIT(pos); shadow 163 drivers/pinctrl/sh-pfc/gpio.c gpio_write_data_reg(chip, reg->info, reg->shadow); shadow 128 drivers/ps3/ps3-lpm.c struct ps3_lpm_shadow_regs shadow; shadow 394 drivers/ps3/ps3-lpm.c return lpm_priv->shadow.pm_control; shadow 398 drivers/ps3/ps3-lpm.c return lpm_priv->shadow.pm_start_stop; shadow 409 drivers/ps3/ps3-lpm.c return lpm_priv->shadow.group_control; shadow 411 drivers/ps3/ps3-lpm.c return lpm_priv->shadow.debug_bus_control; shadow 446 drivers/ps3/ps3-lpm.c if (val != lpm_priv->shadow.group_control) shadow 451 drivers/ps3/ps3-lpm.c lpm_priv->shadow.group_control = val; shadow 454 drivers/ps3/ps3-lpm.c if (val != lpm_priv->shadow.debug_bus_control) shadow 459 drivers/ps3/ps3-lpm.c lpm_priv->shadow.debug_bus_control = val; shadow 465 drivers/ps3/ps3-lpm.c if (val != lpm_priv->shadow.pm_control) shadow 471 drivers/ps3/ps3-lpm.c lpm_priv->shadow.pm_control = val; shadow 478 drivers/ps3/ps3-lpm.c if (val != lpm_priv->shadow.pm_start_stop) shadow 483 drivers/ps3/ps3-lpm.c lpm_priv->shadow.pm_start_stop = val; shadow 847 drivers/ps3/ps3-lpm.c if (!(lpm_priv->shadow.pm_start_stop & shadow 1135 drivers/ps3/ps3-lpm.c lpm_priv->shadow.pm_control = PS3_LPM_SHADOW_REG_INIT; shadow 1136 drivers/ps3/ps3-lpm.c lpm_priv->shadow.pm_start_stop = PS3_LPM_SHADOW_REG_INIT; shadow 1137 drivers/ps3/ps3-lpm.c lpm_priv->shadow.group_control = PS3_LPM_SHADOW_REG_INIT; shadow 1138 drivers/ps3/ps3-lpm.c lpm_priv->shadow.debug_bus_control = PS3_LPM_SHADOW_REG_INIT; shadow 118 drivers/scsi/xen-scsifront.c struct vscsifrnt_shadow *shadow[VSCSIIF_MAX_REQS]; shadow 160 drivers/scsi/xen-scsifront.c info->shadow[id] = NULL; shadow 179 drivers/scsi/xen-scsifront.c struct vscsifrnt_shadow *shadow) shadow 183 drivers/scsi/xen-scsifront.c struct scsi_cmnd *sc = shadow->sc; shadow 194 drivers/scsi/xen-scsifront.c info->shadow[id] = shadow; shadow 195 drivers/scsi/xen-scsifront.c shadow->rqid = id; shadow 201 drivers/scsi/xen-scsifront.c ring_req->act = shadow->act; shadow 202 drivers/scsi/xen-scsifront.c ring_req->ref_rqid = shadow->ref_rqid; shadow 203 drivers/scsi/xen-scsifront.c ring_req->nr_segments = shadow->nr_segments; shadow 217 drivers/scsi/xen-scsifront.c for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++) shadow 218 drivers/scsi/xen-scsifront.c ring_req->seg[i] = shadow->seg[i]; shadow 228 drivers/scsi/xen-scsifront.c struct vscsifrnt_shadow *shadow) shadow 232 drivers/scsi/xen-scsifront.c if (shadow->sc->sc_data_direction == DMA_NONE) shadow 235 drivers/scsi/xen-scsifront.c for (i = 0; i < shadow->nr_grants; i++) { shadow 236 drivers/scsi/xen-scsifront.c if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) { shadow 241 drivers/scsi/xen-scsifront.c gnttab_end_foreign_access(shadow->gref[i], 0, 0UL); shadow 244 drivers/scsi/xen-scsifront.c kfree(shadow->sg); shadow 250 drivers/scsi/xen-scsifront.c struct vscsifrnt_shadow *shadow; shadow 256 drivers/scsi/xen-scsifront.c shadow = info->shadow[id]; shadow 257 drivers/scsi/xen-scsifront.c sc = shadow->sc; shadow 261 drivers/scsi/xen-scsifront.c scsifront_gnttab_done(info, shadow); shadow 281 drivers/scsi/xen-scsifront.c struct vscsifrnt_shadow *shadow = info->shadow[id]; shadow 285 drivers/scsi/xen-scsifront.c shadow->wait_reset = 1; shadow 286 drivers/scsi/xen-scsifront.c switch (shadow->rslt_reset) { shadow 288 drivers/scsi/xen-scsifront.c shadow->rslt_reset = ring_rsp->rslt; shadow 293 drivers/scsi/xen-scsifront.c kfree(shadow); shadow 300 drivers/scsi/xen-scsifront.c shadow->rslt_reset, id); shadow 305 drivers/scsi/xen-scsifront.c wake_up(&shadow->wq_reset); shadow 316 drivers/scsi/xen-scsifront.c if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB) shadow 394 drivers/scsi/xen-scsifront.c struct vscsifrnt_shadow *shadow) shadow 419 drivers/scsi/xen-scsifront.c shadow->sg = kcalloc(data_grants, shadow 421 drivers/scsi/xen-scsifront.c if (!shadow->sg) shadow 424 drivers/scsi/xen-scsifront.c seg = shadow->sg ? : shadow->seg; shadow 429 drivers/scsi/xen-scsifront.c kfree(shadow->sg); shadow 448 drivers/scsi/xen-scsifront.c shadow->gref[ref_cnt] = ref; shadow 449 drivers/scsi/xen-scsifront.c shadow->seg[ref_cnt].gref = ref; shadow 450 drivers/scsi/xen-scsifront.c shadow->seg[ref_cnt].offset = (uint16_t)off; shadow 451 drivers/scsi/xen-scsifront.c shadow->seg[ref_cnt].length = (uint16_t)bytes; shadow 484 drivers/scsi/xen-scsifront.c shadow->gref[ref_cnt] = ref; shadow 499 drivers/scsi/xen-scsifront.c shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants; shadow 501 drivers/scsi/xen-scsifront.c shadow->nr_segments = (uint8_t)ref_cnt; shadow 502 drivers/scsi/xen-scsifront.c shadow->nr_grants = ref_cnt; shadow 532 drivers/scsi/xen-scsifront.c struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc); shadow 538 drivers/scsi/xen-scsifront.c shadow->sc = sc; shadow 539 drivers/scsi/xen-scsifront.c shadow->act = VSCSIIF_ACT_SCSI_CDB; shadow 547 drivers/scsi/xen-scsifront.c err = map_data_for_request(info, sc, shadow); shadow 559 drivers/scsi/xen-scsifront.c if (scsifront_do_request(info, shadow)) { shadow 560 drivers/scsi/xen-scsifront.c scsifront_gnttab_done(info, shadow); shadow 585 drivers/scsi/xen-scsifront.c struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc); shadow 588 drivers/scsi/xen-scsifront.c shadow = kzalloc(sizeof(*shadow), GFP_NOIO); shadow 589 drivers/scsi/xen-scsifront.c if (!shadow) shadow 592 drivers/scsi/xen-scsifront.c shadow->act = act; shadow 593 drivers/scsi/xen-scsifront.c shadow->rslt_reset = RSLT_RESET_WAITING; shadow 594 drivers/scsi/xen-scsifront.c shadow->sc = sc; shadow 595 drivers/scsi/xen-scsifront.c shadow->ref_rqid = s->rqid; shadow 596 drivers/scsi/xen-scsifront.c init_waitqueue_head(&shadow->wq_reset); shadow 604 drivers/scsi/xen-scsifront.c if (!scsifront_do_request(info, shadow)) shadow 618 drivers/scsi/xen-scsifront.c err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset); shadow 622 drivers/scsi/xen-scsifront.c err = shadow->rslt_reset; shadow 623 drivers/scsi/xen-scsifront.c scsifront_put_rqid(info, shadow->rqid); shadow 624 drivers/scsi/xen-scsifront.c kfree(shadow); shadow 627 drivers/scsi/xen-scsifront.c shadow->rslt_reset = RSLT_RESET_ERR; shadow 638 drivers/scsi/xen-scsifront.c kfree(shadow); shadow 69 drivers/video/fbdev/udlfb.c static bool shadow = 1; /* Optionally disable shadow framebuffer */ shadow 1254 drivers/video/fbdev/udlfb.c if (shadow) shadow 1669 drivers/video/fbdev/udlfb.c dev_dbg(&intf->dev, "shadow enable=%d\n", shadow); shadow 1984 drivers/video/fbdev/udlfb.c module_param(shadow, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); shadow 1985 drivers/video/fbdev/udlfb.c MODULE_PARM_DESC(shadow, "Shadow vid mem. Disable to save mem but lose perf"); shadow 2479 fs/io_uring.c struct sqe_submit *s, struct io_kiocb *shadow) shadow 2484 fs/io_uring.c if (!shadow) shadow 2497 fs/io_uring.c __io_free_req(shadow); shadow 2511 fs/io_uring.c list_add_tail(&shadow->list, &ctx->defer_list); shadow 32 fs/nilfs2/dat.c struct nilfs_shadow_map shadow; shadow 500 fs/nilfs2/dat.c nilfs_mdt_setup_shadow_map(dat, &di->shadow); shadow 505 fs/nilfs2/mdt.c struct nilfs_shadow_map *shadow) shadow 509 fs/nilfs2/mdt.c INIT_LIST_HEAD(&shadow->frozen_buffers); shadow 510 fs/nilfs2/mdt.c address_space_init_once(&shadow->frozen_data); shadow 511 fs/nilfs2/mdt.c nilfs_mapping_init(&shadow->frozen_data, inode); shadow 512 fs/nilfs2/mdt.c address_space_init_once(&shadow->frozen_btnodes); shadow 513 fs/nilfs2/mdt.c nilfs_mapping_init(&shadow->frozen_btnodes, inode); shadow 514 fs/nilfs2/mdt.c mi->mi_shadow = shadow; shadow 526 fs/nilfs2/mdt.c struct nilfs_shadow_map *shadow = mi->mi_shadow; shadow 529 fs/nilfs2/mdt.c ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping); shadow 533 fs/nilfs2/mdt.c ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes, shadow 538 fs/nilfs2/mdt.c nilfs_bmap_save(ii->i_bmap, &shadow->bmap_store); shadow 545 fs/nilfs2/mdt.c struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow; shadow 550 fs/nilfs2/mdt.c page = grab_cache_page(&shadow->frozen_data, bh->b_page->index); shadow 563 fs/nilfs2/mdt.c &shadow->frozen_buffers); shadow 577 fs/nilfs2/mdt.c struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow; shadow 582 fs/nilfs2/mdt.c page = find_lock_page(&shadow->frozen_data, bh->b_page->index); shadow 594 fs/nilfs2/mdt.c static void nilfs_release_frozen_buffers(struct nilfs_shadow_map *shadow) shadow 596 fs/nilfs2/mdt.c struct list_head *head = &shadow->frozen_buffers; shadow 615 fs/nilfs2/mdt.c struct nilfs_shadow_map *shadow = mi->mi_shadow; shadow 623 fs/nilfs2/mdt.c nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data); shadow 626 fs/nilfs2/mdt.c nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes); shadow 628 fs/nilfs2/mdt.c nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store); shadow 640 fs/nilfs2/mdt.c struct nilfs_shadow_map *shadow = mi->mi_shadow; shadow 643 fs/nilfs2/mdt.c nilfs_release_frozen_buffers(shadow); shadow 644 fs/nilfs2/mdt.c truncate_inode_pages(&shadow->frozen_data, 0); shadow 645 fs/nilfs2/mdt.c truncate_inode_pages(&shadow->frozen_btnodes, 0); shadow 87 fs/nilfs2/mdt.h struct nilfs_shadow_map *shadow); shadow 317 fs/xfs/xfs_log_cil.c struct xfs_log_vec *shadow; shadow 328 fs/xfs/xfs_log_cil.c shadow = lip->li_lv_shadow; shadow 329 fs/xfs/xfs_log_cil.c if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) shadow 333 fs/xfs/xfs_log_cil.c if (!shadow->lv_niovecs && !ordered) shadow 338 fs/xfs/xfs_log_cil.c if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { shadow 354 fs/xfs/xfs_log_cil.c lv->lv_niovecs = shadow->lv_niovecs; shadow 363 fs/xfs/xfs_log_cil.c lv = shadow; shadow 332 include/linux/jbd2.h BUFFER_FNS(Shadow, shadow) shadow 612 include/linux/pagemap.h extern void __delete_from_page_cache(struct page *page, void *shadow); shadow 13 include/linux/scx200_gpio.h #define __SCx200_GPIO_SHADOW unsigned long *shadow = scx200_gpio_shadow+bank shadow 16 include/linux/scx200_gpio.h #define __SCx200_GPIO_OUT __asm__ __volatile__("outsl":"=mS" (shadow):"d" (ioaddr), "0" (shadow)) shadow 46 include/linux/scx200_gpio.h set_bit(index, shadow); /* __set_bit()? */ shadow 57 include/linux/scx200_gpio.h clear_bit(index, shadow); /* __clear_bit()? */ shadow 69 include/linux/scx200_gpio.h set_bit(index, shadow); shadow 71 include/linux/scx200_gpio.h clear_bit(index, shadow); shadow 81 include/linux/scx200_gpio.h change_bit(index, shadow); shadow 311 include/linux/swap.h void workingset_refault(struct page *page, void *shadow); shadow 70 kernel/livepatch/shadow.c static inline bool klp_shadow_match(struct klp_shadow *shadow, void *obj, shadow 73 kernel/livepatch/shadow.c return shadow->obj == obj && shadow->id == id; shadow 85 kernel/livepatch/shadow.c struct klp_shadow *shadow; shadow 89 kernel/livepatch/shadow.c hash_for_each_possible_rcu(klp_shadow_hash, shadow, node, shadow 92 kernel/livepatch/shadow.c if (klp_shadow_match(shadow, obj, id)) { shadow 94 kernel/livepatch/shadow.c return shadow->data; shadow 234 kernel/livepatch/shadow.c static void klp_shadow_free_struct(struct klp_shadow *shadow, shadow 237 kernel/livepatch/shadow.c hash_del_rcu(&shadow->node); shadow 239 kernel/livepatch/shadow.c dtor(shadow->obj, shadow->data); shadow 240 kernel/livepatch/shadow.c kfree_rcu(shadow, rcu_head); shadow 255 kernel/livepatch/shadow.c struct klp_shadow *shadow; shadow 261 kernel/livepatch/shadow.c hash_for_each_possible(klp_shadow_hash, shadow, node, shadow 264 kernel/livepatch/shadow.c if (klp_shadow_match(shadow, obj, id)) { shadow 265 kernel/livepatch/shadow.c klp_shadow_free_struct(shadow, dtor); shadow 285 kernel/livepatch/shadow.c struct klp_shadow *shadow; shadow 292 kernel/livepatch/shadow.c hash_for_each(klp_shadow_hash, i, shadow, node) { shadow 293 kernel/livepatch/shadow.c if (klp_shadow_match(shadow, shadow->obj, id)) shadow 294 kernel/livepatch/shadow.c klp_shadow_free_struct(shadow, dtor); shadow 120 mm/filemap.c struct page *page, void *shadow) shadow 135 mm/filemap.c VM_BUG_ON_PAGE(nr != 1 && shadow, page); shadow 137 mm/filemap.c xas_store(&xas, shadow); shadow 143 mm/filemap.c if (shadow) { shadow 231 mm/filemap.c void __delete_from_page_cache(struct page *page, void *shadow) shadow 238 mm/filemap.c page_cache_delete(mapping, page, shadow); shadow 939 mm/filemap.c void *shadow = NULL; shadow 944 mm/filemap.c gfp_mask, &shadow); shadow 957 mm/filemap.c if (!(gfp_mask & __GFP_WRITE) && shadow) shadow 958 mm/filemap.c workingset_refault(page, shadow); shadow 163 mm/kasan/common.c u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size); shadow 166 mm/kasan/common.c *shadow = tag; shadow 168 mm/kasan/common.c *shadow = size & KASAN_SHADOW_MASK; shadow 402 mm/kasan/report.c static int shadow_pointer_offset(const void *row, const void *shadow) shadow 407 mm/kasan/report.c return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 + shadow 408 mm/kasan/report.c (shadow - row) / SHADOW_BYTES_PER_BLOCK + 1; shadow 414 mm/kasan/report.c const void *shadow = kasan_mem_to_shadow(addr); shadow 417 mm/kasan/report.c shadow_row = (void *)round_down((unsigned long)shadow, shadow 440 mm/kasan/report.c if (row_is_guilty(shadow_row, shadow)) shadow 442 mm/kasan/report.c shadow_pointer_offset(shadow_row, shadow), shadow 82 mm/kasan/tags.c u8 *shadow_first, *shadow_last, *shadow; shadow 118 mm/kasan/tags.c for (shadow = shadow_first; shadow <= shadow_last; shadow++) { shadow 119 mm/kasan/tags.c if (*shadow != tag) { shadow 79 mm/kasan/tags_report.c u8 *shadow = (u8 *)kasan_mem_to_shadow(addr); shadow 81 mm/kasan/tags_report.c pr_err("Pointer tag: [%02x], memory tag: [%02x]\n", addr_tag, *shadow); shadow 953 mm/vmscan.c void *shadow = NULL; shadow 974 mm/vmscan.c shadow = workingset_eviction(page); shadow 975 mm/vmscan.c __delete_from_page_cache(page, shadow); shadow 196 mm/workingset.c static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, shadow 199 mm/workingset.c unsigned long entry = xa_to_value(shadow); shadow 249 mm/workingset.c void workingset_refault(struct page *page, void *shadow) shadow 261 mm/workingset.c unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset); shadow 99 scripts/kconfig/lxdialog/dialog.h struct dialog_color shadow; shadow 21 scripts/kconfig/lxdialog/util.c dlg.shadow.atr = A_NORMAL; shadow 61 scripts/kconfig/lxdialog/util.c DLG_COLOR(shadow, COLOR_BLACK, COLOR_BLACK, true); shadow 94 scripts/kconfig/lxdialog/util.c DLG_COLOR(shadow, COLOR_BLACK, COLOR_BLACK, false); shadow 181 scripts/kconfig/lxdialog/util.c init_one_color(&dlg.shadow); shadow 503 scripts/kconfig/lxdialog/util.c wattrset(win, dlg.shadow.atr); shadow 310 tools/arch/x86/include/uapi/asm/kvm.h __u8 shadow;