writable 58 arch/arm/include/asm/kvm_mmu.h phys_addr_t pa, unsigned long size, bool writable); writable 38 arch/arm64/include/asm/hugetlb.h struct page *page, int writable); writable 159 arch/arm64/include/asm/kvm_mmu.h phys_addr_t pa, unsigned long size, bool writable); writable 298 arch/arm64/mm/hugetlbpage.c struct page *page, int writable) writable 239 arch/powerpc/include/asm/kvm_book3s.h bool writing, bool *writable); writable 427 arch/powerpc/kvm/book3s.c bool *writable) writable 443 arch/powerpc/kvm/book3s.c if (writable) writable 444 arch/powerpc/kvm/book3s.c *writable = true; writable 448 arch/powerpc/kvm/book3s.c return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); writable 145 arch/powerpc/kvm/book3s_32_mmu_host.c bool writable; writable 148 arch/powerpc/kvm/book3s_32_mmu_host.c hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); writable 198 arch/powerpc/kvm/book3s_32_mmu_host.c if (orig_pte->may_write && writable) { writable 84 arch/powerpc/kvm/book3s_64_mmu_host.c bool writable; writable 96 arch/powerpc/kvm/book3s_64_mmu_host.c pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); writable 124 arch/powerpc/kvm/book3s_64_mmu_host.c if (!orig_pte->may_write || !writable) writable 3613 arch/s390/kvm/kvm-s390.c long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) writable 3616 arch/s390/kvm/kvm-s390.c writable ? FAULT_FLAG_WRITE : 0); writable 286 arch/s390/kvm/kvm-s390.h long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); writable 1440 arch/s390/kvm/priv.c bool writable; writable 1475 arch/s390/kvm/priv.c hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable); writable 1479 arch/s390/kvm/priv.c if (!writable) writable 381 arch/sparc/include/asm/pgtable_64.h struct page *page, int writable); writable 3643 arch/x86/kvm/mmu.c bool *writable); writable 4197 arch/x86/kvm/mmu.c bool *writable) writable 4212 arch/x86/kvm/mmu.c *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable); writable 4226 arch/x86/kvm/mmu.c *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable); writable 231 drivers/hwtracing/intel_th/gth.c .writable = (_w) } writable 240 drivers/hwtracing/intel_th/gth.c writable : 1; writable 444 drivers/hwtracing/intel_th/gth.c if (output_parms[j].writable) { writable 57 drivers/infiniband/core/umem.c put_user_pages_dirty_lock(&page, 1, umem->writable && dirty); writable 240 drivers/infiniband/core/umem.c umem->writable = ib_access_writable(access); writable 271 drivers/infiniband/core/umem.c if (!umem->writable) writable 316 drivers/infiniband/core/umem_odp.c umem->writable = ib_access_writable(access); writable 360 drivers/infiniband/core/umem_odp.c umem->writable = root->umem.writable; writable 412 drivers/infiniband/core/umem_odp.c umem_odp->umem.writable = ib_access_writable(access); writable 2019 drivers/infiniband/hw/hfi1/hfi.h size_t npages, bool writable, struct page **pages); writable 104 drivers/infiniband/hw/hfi1/user_pages.c bool writable, struct page **pages) writable 107 drivers/infiniband/hw/hfi1/user_pages.c unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0); writable 489 drivers/infiniband/hw/mlx4/mr.c !mmr->umem->writable) { writable 2169 drivers/infiniband/hw/mlx5/devx.c (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) | writable 642 drivers/infiniband/hw/mlx5/odp.c if (prefetch && !downgrade && !odp->umem.writable) { writable 650 drivers/infiniband/hw/mlx5/odp.c if (odp->umem.writable && !downgrade) writable 85 drivers/infiniband/hw/usnic/usnic_uiom.c static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, writable 137 drivers/infiniband/hw/usnic/usnic_uiom.c flags |= (writable) ? IOMMU_WRITE : 0; writable 139 drivers/infiniband/hw/usnic/usnic_uiom.c gup_flags |= (writable) ? 0 : FOLL_FORCE; writable 221 drivers/infiniband/hw/usnic/usnic_uiom.c int writable = 0; writable 235 drivers/infiniband/hw/usnic/usnic_uiom.c writable = 1; writable 240 drivers/infiniband/hw/usnic/usnic_uiom.c usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable); writable 259 drivers/infiniband/hw/usnic/usnic_uiom.c flags |= (uiomr->writable) ? IOMMU_WRITE : 0; writable 336 drivers/infiniband/hw/usnic/usnic_uiom.c int writable, int dmasync) writable 351 drivers/infiniband/hw/usnic/usnic_uiom.c writable = 1; writable 366 drivers/infiniband/hw/usnic/usnic_uiom.c uiomr->writable = writable; writable 369 drivers/infiniband/hw/usnic/usnic_uiom.c err = usnic_uiom_get_pages(addr, size, writable, dmasync, writable 379 drivers/infiniband/hw/usnic/usnic_uiom.c (writable) ? IOMMU_WRITE : 0, writable 398 drivers/infiniband/hw/usnic/usnic_uiom.c (writable) ? IOMMU_WRITE : 0); writable 71 drivers/infiniband/hw/usnic/usnic_uiom.h int writable; writable 135 drivers/infiniband/sw/siw/siw.h bool writable; writable 78 drivers/infiniband/sw/siw/siw_mem.c umem->writable && dirty); writable 368 drivers/infiniband/sw/siw/siw_mem.c struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable) writable 393 drivers/infiniband/sw/siw/siw_mem.c umem->writable = writable; writable 397 drivers/infiniband/sw/siw/siw_mem.c if (!writable) writable 9 drivers/infiniband/sw/siw/siw_mem.h struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable); writable 19 drivers/mfd/wm8350-regmap.c u16 writable; /* Mask of writable bits */ writable 298 drivers/mfd/wm8350-regmap.c return wm8350_reg_io_map[reg].writable; writable 568 drivers/misc/eeprom/at24.c bool writable; writable 669 drivers/misc/eeprom/at24.c writable = !(flags & AT24_FLAG_READONLY); writable 670 drivers/misc/eeprom/at24.c if (writable) { writable 686 drivers/misc/eeprom/at24.c nvmem_config.read_only = !writable; writable 721 drivers/misc/eeprom/at24.c writable ? "writable" : "read-only", at24->write_max); writable 613 drivers/power/supply/smb347-charger.c static int smb347_set_writable(struct smb347_charger *smb, bool writable) writable 616 drivers/power/supply/smb347-charger.c writable ? CMD_A_ALLOW_WRITE : 0); writable 97 fs/nfsd/nfs4proc.c u32 *bmval, u32 *writable) writable 109 fs/nfsd/nfs4proc.c if (writable && !bmval_is_subset(bmval, writable)) writable 111 fs/nfsd/nfs4proc.c if (writable && (bmval[2] & FATTR4_WORD2_MODE_UMASK) && writable 450 include/linux/hugetlb.h struct page *page, int writable) writable 707 include/linux/kvm_host.h unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); writable 710 include/linux/kvm_host.h bool *writable); writable 718 include/linux/kvm_host.h bool *writable); writable 723 include/linux/kvm_host.h bool *writable); writable 769 include/linux/kvm_host.h unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); writable 95 include/linux/mm_types_task.h bool writable; writable 49 include/rdma/ib_umem.h u32 writable : 1; writable 51 include/trace/events/huge_memory.h TP_PROTO(struct mm_struct *mm, struct page *page, bool writable, writable 54 include/trace/events/huge_memory.h TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped), writable 59 include/trace/events/huge_memory.h __field(bool, writable) writable 69 include/trace/events/huge_memory.h __entry->writable = writable; writable 79 include/trace/events/huge_memory.h __entry->writable, writable 113 include/trace/events/huge_memory.h int referenced, bool writable, int status), writable 115 include/trace/events/huge_memory.h TP_ARGS(page, none_or_zero, referenced, writable, status), writable 121 include/trace/events/huge_memory.h __field(bool, writable) writable 129 include/trace/events/huge_memory.h __entry->writable = writable; writable 137 include/trace/events/huge_memory.h __entry->writable, writable 3365 mm/hugetlb.c int writable) writable 3369 mm/hugetlb.c if (writable) { writable 3378 mm/hugetlb.c entry = arch_make_huge_pte(entry, vma, page, writable); writable 539 mm/khugepaged.c bool writable = false; writable 594 mm/khugepaged.c writable = true; writable 628 mm/khugepaged.c if (likely(writable)) { writable 632 mm/khugepaged.c referenced, writable, result); writable 642 mm/khugepaged.c referenced, writable, result); writable 1127 mm/khugepaged.c bool writable = false; writable 1164 mm/khugepaged.c writable = true; writable 1217 mm/khugepaged.c if (writable) { writable 1235 mm/khugepaged.c trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, writable 591 mm/rmap.c tlb_ubc->writable = false; writable 599 mm/rmap.c if (tlb_ubc->writable) writable 603 mm/rmap.c static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) writable 622 mm/rmap.c if (writable) writable 623 mm/rmap.c tlb_ubc->writable = true; writable 674 mm/rmap.c static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) writable 2643 net/unix/af_unix.c unsigned int writable; writable 2676 net/unix/af_unix.c writable = unix_writable(sk); writable 2677 net/unix/af_unix.c if (writable) { writable 2684 net/unix/af_unix.c writable = 0; writable 2689 net/unix/af_unix.c if (writable) writable 732 net/vmw_vsock/hyperv_transport.c int hvs_notify_poll_out(struct vsock_sock *vsk, size_t target, bool *writable) writable 734 net/vmw_vsock/hyperv_transport.c *writable = hvs_stream_has_space(vsk) > 0; writable 21 tools/testing/selftests/kvm/lib/x86_64/processor.c uint64_t writable:1; writable 36 tools/testing/selftests/kvm/lib/x86_64/processor.c uint64_t writable:1; writable 51 tools/testing/selftests/kvm/lib/x86_64/processor.c uint64_t writable:1; writable 66 tools/testing/selftests/kvm/lib/x86_64/processor.c uint64_t writable:1; writable 295 tools/testing/selftests/kvm/lib/x86_64/processor.c pml4e[index[3]].writable = true; writable 306 tools/testing/selftests/kvm/lib/x86_64/processor.c pdpe[index[2]].writable = true; writable 317 tools/testing/selftests/kvm/lib/x86_64/processor.c pde[index[1]].writable = true; writable 325 tools/testing/selftests/kvm/lib/x86_64/processor.c pte[index[0]].writable = true; writable 369 tools/testing/selftests/kvm/lib/x86_64/processor.c pml4e->writable, pml4e->execute_disable); writable 382 tools/testing/selftests/kvm/lib/x86_64/processor.c (uint64_t) pdpe->address, pdpe->writable, writable 395 tools/testing/selftests/kvm/lib/x86_64/processor.c (uint64_t) pde->address, pde->writable, writable 411 tools/testing/selftests/kvm/lib/x86_64/processor.c pte->writable, writable 25 tools/testing/selftests/kvm/lib/x86_64/vmx.c uint64_t writable:1; writable 429 tools/testing/selftests/kvm/lib/x86_64/vmx.c pml4e[index[3]].writable = true; writable 441 tools/testing/selftests/kvm/lib/x86_64/vmx.c pdpe[index[2]].writable = true; writable 453 tools/testing/selftests/kvm/lib/x86_64/vmx.c pde[index[1]].writable = true; writable 462 tools/testing/selftests/kvm/lib/x86_64/vmx.c pte[index[0]].writable = true; writable 1340 virt/kvm/arm/mmu.c phys_addr_t pa, unsigned long size, bool writable) writable 1353 virt/kvm/arm/mmu.c if (writable) writable 1674 virt/kvm/arm/mmu.c bool write_fault, writable, force_pte = false; writable 1742 virt/kvm/arm/mmu.c pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); writable 1766 virt/kvm/arm/mmu.c writable = false; writable 1790 virt/kvm/arm/mmu.c if (writable) writable 1814 virt/kvm/arm/mmu.c if (writable) writable 1826 virt/kvm/arm/mmu.c if (writable) writable 1836 virt/kvm/arm/mmu.c if (writable) { writable 1914 virt/kvm/arm/mmu.c bool is_iabt, write_fault, writable; writable 1955 virt/kvm/arm/mmu.c hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); writable 1957 virt/kvm/arm/mmu.c if (kvm_is_error_hva(hva) || (write_fault && !writable)) { writable 2283 virt/kvm/arm/mmu.c bool writable = !(mem->flags & KVM_MEM_READONLY); writable 2322 virt/kvm/arm/mmu.c if (writable && !(vma->vm_flags & VM_WRITE)) { writable 2349 virt/kvm/arm/mmu.c writable); writable 1481 virt/kvm/kvm_main.c gfn_t gfn, bool *writable) writable 1485 virt/kvm/kvm_main.c if (!kvm_is_error_hva(hva) && writable) writable 1486 virt/kvm/kvm_main.c *writable = !memslot_is_readonly(slot); writable 1491 virt/kvm/kvm_main.c unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) writable 1495 virt/kvm/kvm_main.c return gfn_to_hva_memslot_prot(slot, gfn, writable); writable 1498 virt/kvm/kvm_main.c unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable) writable 1502 virt/kvm/kvm_main.c return gfn_to_hva_memslot_prot(slot, gfn, writable); writable 1519 virt/kvm/kvm_main.c bool *writable, kvm_pfn_t *pfn) writable 1529 virt/kvm/kvm_main.c if (!(write_fault || writable)) writable 1536 virt/kvm/kvm_main.c if (writable) writable 1537 virt/kvm/kvm_main.c *writable = true; writable 1549 virt/kvm/kvm_main.c bool *writable, kvm_pfn_t *pfn) writable 1557 virt/kvm/kvm_main.c if (writable) writable 1558 virt/kvm/kvm_main.c *writable = write_fault; writable 1570 virt/kvm/kvm_main.c if (unlikely(!write_fault) && writable) { writable 1574 virt/kvm/kvm_main.c *writable = true; writable 1596 virt/kvm/kvm_main.c bool write_fault, bool *writable, writable 1623 virt/kvm/kvm_main.c if (writable) writable 1624 virt/kvm/kvm_main.c *writable = true; writable 1658 virt/kvm/kvm_main.c bool write_fault, bool *writable) writable 1667 virt/kvm/kvm_main.c if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) writable 1673 virt/kvm/kvm_main.c npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); writable 1690 virt/kvm/kvm_main.c r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn); writable 1707 virt/kvm/kvm_main.c bool *writable) writable 1712 virt/kvm/kvm_main.c if (writable) writable 1713 virt/kvm/kvm_main.c *writable = false; writable 1718 virt/kvm/kvm_main.c if (writable) writable 1719 virt/kvm/kvm_main.c *writable = false; writable 1724 virt/kvm/kvm_main.c if (writable && memslot_is_readonly(slot)) { writable 1725 virt/kvm/kvm_main.c *writable = false; writable 1726 virt/kvm/kvm_main.c writable = NULL; writable 1730 virt/kvm/kvm_main.c writable); writable 1735 virt/kvm/kvm_main.c bool *writable) writable 1738 virt/kvm/kvm_main.c write_fault, writable);