from 25 arch/alpha/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) from 303 arch/alpha/include/asm/uaccess.h extern long __copy_user(void *to, const void *from, long len); from 306 arch/alpha/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long len) from 308 arch/alpha/include/asm/uaccess.h return __copy_user(to, (__force const void *)from, len); from 312 arch/alpha/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long len) from 314 arch/alpha/include/asm/uaccess.h return __copy_user((__force void *)to, from, len); from 421 arch/alpha/kernel/io.c void memcpy_fromio(void *to, const volatile void __iomem *from, long count) from 426 arch/alpha/kernel/io.c if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { from 429 arch/alpha/kernel/io.c *(u64 *)to = __raw_readq(from); from 432 arch/alpha/kernel/io.c from += 8; from 437 arch/alpha/kernel/io.c if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { from 440 arch/alpha/kernel/io.c *(u32 *)to = __raw_readl(from); from 443 arch/alpha/kernel/io.c from += 4; from 448 arch/alpha/kernel/io.c if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { from 451 arch/alpha/kernel/io.c *(u16 *)to = __raw_readw(from); from 454 arch/alpha/kernel/io.c from += 2; from 460 arch/alpha/kernel/io.c *(u8 *) to = __raw_readb(from); from 463 arch/alpha/kernel/io.c from++; from 475 arch/alpha/kernel/io.c void memcpy_toio(volatile void __iomem *to, const void *from, long count) from 481 arch/alpha/kernel/io.c if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { from 484 arch/alpha/kernel/io.c __raw_writeq(*(const u64 *)from, to); from 487 arch/alpha/kernel/io.c from += 8; from 492 arch/alpha/kernel/io.c if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { from 495 arch/alpha/kernel/io.c __raw_writel(*(const u32 *)from, to); from 498 arch/alpha/kernel/io.c from += 4; from 503 arch/alpha/kernel/io.c if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { from 506 arch/alpha/kernel/io.c __raw_writew(*(const u16 *)from, to); from 509 arch/alpha/kernel/io.c from += 2; from 515 arch/alpha/kernel/io.c __raw_writeb(*(const u8 *) from, to); from 518 arch/alpha/kernel/io.c from++; from 272 arch/alpha/kernel/setup.c char *from = s; from 274 arch/alpha/kernel/setup.c end = simple_strtoul(from, &from, 0); from 275 arch/alpha/kernel/setup.c if ( *from == 'K' || *from == 'k' ) { from 277 arch/alpha/kernel/setup.c from++; from 278 arch/alpha/kernel/setup.c } else if ( *from == 'M' || *from == 'm' ) { from 280 arch/alpha/kernel/setup.c from++; from 281 arch/alpha/kernel/setup.c } else if ( *from == 'G' || *from == 'g' ) { from 283 arch/alpha/kernel/setup.c from++; from 144 arch/arc/include/asm/entry-arcv2.h ; - U mode: retrieve it from AUX_USER_SP from 145 arch/arc/include/asm/entry-arcv2.h ; - K mode: add the offset from current SP where H/w starts auto push from 13 arch/arc/include/asm/page.h #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) from 20 arch/arc/include/asm/page.h void copy_user_highpage(struct page *to, struct page *from, from 21 arch/arc/include/asm/tlb-mmu1.h ; -avoiding use of GetIndex from MMU from 24 arch/arc/include/asm/tlb-mmu1.h ; r1 = TLBPD0 from TLB_RELOAD above from 168 arch/arc/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 179 arch/arc/include/asm/uaccess.h if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) { from 205 arch/arc/include/asm/uaccess.h "=&r" (tmp), "+r" (to), "+r" (from) from 246 arch/arc/include/asm/uaccess.h : "+r" (res), "+r"(to), "+r"(from), from 270 arch/arc/include/asm/uaccess.h : "+r" (res), "+r"(to), "+r"(from), from 291 arch/arc/include/asm/uaccess.h : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) from 311 arch/arc/include/asm/uaccess.h : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) from 329 arch/arc/include/asm/uaccess.h : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) from 385 arch/arc/include/asm/uaccess.h : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val), from 395 arch/arc/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 406 arch/arc/include/asm/uaccess.h if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) { from 431 arch/arc/include/asm/uaccess.h "=&r" (tmp), "+r" (to), "+r" (from) from 468 arch/arc/include/asm/uaccess.h : "+r" (res), "+r"(to), "+r"(from), from 492 arch/arc/include/asm/uaccess.h : "+r" (res), "+r"(to), "+r"(from), from 513 arch/arc/include/asm/uaccess.h : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) from 533 arch/arc/include/asm/uaccess.h : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) from 551 arch/arc/include/asm/uaccess.h : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1) from 607 arch/arc/include/asm/uaccess.h : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val), from 1071 arch/arc/mm/cache.c void copy_user_highpage(struct page *to, struct page *from, from 1074 arch/arc/mm/cache.c void *kfrom = kmap_atomic(from); from 1089 arch/arc/mm/cache.c if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { from 1112 arch/arc/mm/cache.c set_bit(PG_dc_clean, &from->flags); from 1114 arch/arc/mm/cache.c clear_bit(PG_dc_clean, &from->flags); from 326 arch/arm/include/asm/io.h static inline void memcpy_fromio(void *to, const volatile void __iomem *from, from 330 arch/arm/include/asm/io.h mmiocpy(to, (const void __force *)from, count); from 332 arch/arm/include/asm/io.h #define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count) from 334 arch/arm/include/asm/io.h static inline void memcpy_toio(volatile void __iomem *to, const void *from, from 338 arch/arm/include/asm/io.h mmiocpy((void __force *)to, from, count); from 340 arch/arm/include/asm/io.h #define memcpy_toio(to,from,count) memcpy_toio(to,from,count) from 46 arch/arm/include/asm/kvm_mmu.h int create_hyp_mappings(void *from, void *to, pgprot_t prot); from 186 arch/arm/include/asm/memory.h #define __pv_stub(from,to,instr,type) \ from 193 arch/arm/include/asm/memory.h : "r" (from), "I" (type)) from 12 arch/arm/include/asm/page-nommu.h #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) from 15 arch/arm/include/asm/page-nommu.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) from 112 arch/arm/include/asm/page.h void (*cpu_copy_user_highpage)(struct page *to, struct page *from, from 128 arch/arm/include/asm/page.h extern void __cpu_copy_user_highpage(struct page *to, struct page *from, from 136 arch/arm/include/asm/page.h #define copy_user_highpage(to,from,vaddr,vma) \ from 137 arch/arm/include/asm/page.h __cpu_copy_user_highpage(to, from, vaddr, vma) from 140 arch/arm/include/asm/page.h extern void copy_page(void *to, const void *from); from 32 arch/arm/include/asm/traps.h extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); from 513 arch/arm/include/asm/uaccess.h arm_copy_from_user(void *to, const void __user *from, unsigned long n); from 516 arch/arm/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 521 arch/arm/include/asm/uaccess.h n = arm_copy_from_user(to, from, n); from 527 arch/arm/include/asm/uaccess.h arm_copy_to_user(void __user *to, const void *from, unsigned long n); from 529 arch/arm/include/asm/uaccess.h __copy_to_user_std(void __user *to, const void *from, unsigned long n); from 532 arch/arm/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 537 arch/arm/include/asm/uaccess.h n = arm_copy_to_user(to, from, n); from 541 arch/arm/include/asm/uaccess.h return arm_copy_to_user(to, from, n); from 561 arch/arm/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 563 arch/arm/include/asm/uaccess.h memcpy(to, (const void __force *)from, n); from 567 arch/arm/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 569 arch/arm/include/asm/uaccess.h memcpy((void __force *)to, from, n); from 183 arch/arm/kernel/atags_parse.c char *from = default_command_line; from 219 arch/arm/kernel/atags_parse.c mdesc->fixup(tags, &from); from 229 arch/arm/kernel/atags_parse.c strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); from 45 arch/arm/kernel/io.c void _memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) from 50 arch/arm/kernel/io.c *t = readb(from); from 52 arch/arm/kernel/io.c from++; from 61 arch/arm/kernel/io.c void _memcpy_toio(volatile void __iomem *to, const void *from, size_t count) from 63 arch/arm/kernel/io.c const unsigned char *f = from; from 65 arch/arm/kernel/traps.c void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) from 68 arch/arm/kernel/traps.c printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); from 70 arch/arm/kernel/traps.c printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); from 73 arch/arm/kernel/traps.c if (in_entry_text(from)) from 85 arch/arm/lib/uaccess_with_memcpy.c __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) from 91 arch/arm/lib/uaccess_with_memcpy.c memcpy((void *)to, from, n); from 119 arch/arm/lib/uaccess_with_memcpy.c memcpy((void *)to, from, tocopy); from 122 arch/arm/lib/uaccess_with_memcpy.c from += tocopy; from 138 arch/arm/lib/uaccess_with_memcpy.c arm_copy_to_user(void __user *to, const void *from, unsigned long n) from 149 arch/arm/lib/uaccess_with_memcpy.c n = __copy_to_user_std(to, from, n); from 153 arch/arm/lib/uaccess_with_memcpy.c from, n); from 349 arch/arm/mach-ebsa110/io.c void outsb(unsigned int port, const void *from, int len) from 361 arch/arm/mach-ebsa110/io.c __raw_writesb((void __iomem *)ISAIO_BASE + off, from, len); from 364 arch/arm/mach-ebsa110/io.c void insb(unsigned int port, void *from, int len) from 376 arch/arm/mach-ebsa110/io.c __raw_readsb((void __iomem *)ISAIO_BASE + off, from, len); from 382 arch/arm/mach-ebsa110/io.c void outsw(unsigned int port, const void *from, int len) from 394 arch/arm/mach-ebsa110/io.c __raw_writesw((void __iomem *)ISAIO_BASE + off, from, len); from 397 arch/arm/mach-ebsa110/io.c void insw(unsigned int port, void *from, int len) from 409 arch/arm/mach-ebsa110/io.c __raw_readsw((void __iomem *)ISAIO_BASE + off, from, len); from 419 arch/arm/mach-ebsa110/io.c void outsl(unsigned int port, const void *from, int len) from 426 arch/arm/mach-ebsa110/io.c __raw_writesw((void __iomem *)ISAIO_BASE + off, from, len << 1); from 429 arch/arm/mach-ebsa110/io.c void insl(unsigned int port, void *from, int len) from 436 arch/arm/mach-ebsa110/io.c __raw_readsw((void __iomem *)ISAIO_BASE + off, from, len << 1); from 160 arch/arm/mach-mxs/mach-mxs.c struct device_node *np, *from = NULL; from 168 arch/arm/mach-mxs/mach-mxs.c np = of_find_compatible_node(from, NULL, "fsl,imx28-fec"); from 172 arch/arm/mach-mxs/mach-mxs.c from = np; from 43 arch/arm/mach-omap2/pm44xx.c const char *from; from 182 arch/arm/mach-omap2/pm44xx.c {.from = "mpuss_clkdm", .to = "l3_emif_clkdm"}, from 183 arch/arm/mach-omap2/pm44xx.c {.from = "mpuss_clkdm", .to = "l3_1_clkdm"}, from 184 arch/arm/mach-omap2/pm44xx.c {.from = "mpuss_clkdm", .to = "l3_2_clkdm"}, from 185 arch/arm/mach-omap2/pm44xx.c {.from = "ducati_clkdm", .to = "l3_1_clkdm"}, from 186 arch/arm/mach-omap2/pm44xx.c {.from = "ducati_clkdm", .to = "l3_2_clkdm"}, from 187 arch/arm/mach-omap2/pm44xx.c {.from = NULL} /* TERMINATION */ from 191 arch/arm/mach-omap2/pm44xx.c {.from = "mpu_clkdm", .to = "emif_clkdm"}, from 192 arch/arm/mach-omap2/pm44xx.c {.from = NULL} /* TERMINATION */ from 202 arch/arm/mach-omap2/pm44xx.c struct clockdomain *from, *to; from 207 arch/arm/mach-omap2/pm44xx.c while (map->from) { from 208 arch/arm/mach-omap2/pm44xx.c from = clkdm_lookup(map->from); from 210 arch/arm/mach-omap2/pm44xx.c if (!from || !to) { from 212 arch/arm/mach-omap2/pm44xx.c map->from, map->to); from 215 arch/arm/mach-omap2/pm44xx.c ret = clkdm_add_wkdep(from, to); from 218 arch/arm/mach-omap2/pm44xx.c map->from, map->to, ret); from 380 arch/arm/mach-orion5x/common.c void __init tag_fixup_mem32(struct tag *t, char **from) from 38 arch/arm/mm/copypage-fa.c void fa_copy_user_highpage(struct page *to, struct page *from, from 44 arch/arm/mm/copypage-fa.c kfrom = kmap_atomic(from); from 65 arch/arm/mm/copypage-feroceon.c void feroceon_copy_user_highpage(struct page *to, struct page *from, from 71 arch/arm/mm/copypage-feroceon.c kfrom = kmap_atomic(from); from 72 arch/arm/mm/copypage-feroceon.c flush_cache_page(vma, vaddr, page_to_pfn(from)); from 40 arch/arm/mm/copypage-v4mc.c static void mc_copy_user_page(void *from, void *to) from 59 arch/arm/mm/copypage-v4mc.c : "+&r" (from), "+&r" (to), "=&r" (tmp) from 64 arch/arm/mm/copypage-v4mc.c void v4_mc_copy_user_highpage(struct page *to, struct page *from, from 69 arch/arm/mm/copypage-v4mc.c if (!test_and_set_bit(PG_dcache_clean, &from->flags)) from 70 arch/arm/mm/copypage-v4mc.c __flush_dcache_page(page_mapping_file(from), from); from 74 arch/arm/mm/copypage-v4mc.c set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); from 47 arch/arm/mm/copypage-v4wb.c void v4wb_copy_user_highpage(struct page *to, struct page *from, from 53 arch/arm/mm/copypage-v4wb.c kfrom = kmap_atomic(from); from 54 arch/arm/mm/copypage-v4wb.c flush_cache_page(vma, vaddr, page_to_pfn(from)); from 43 arch/arm/mm/copypage-v4wt.c void v4wt_copy_user_highpage(struct page *to, struct page *from, from 49 arch/arm/mm/copypage-v4wt.c kfrom = kmap_atomic(from); from 31 arch/arm/mm/copypage-v6.c struct page *from, unsigned long vaddr, struct vm_area_struct *vma) from 35 arch/arm/mm/copypage-v6.c kfrom = kmap_atomic(from); from 70 arch/arm/mm/copypage-v6.c struct page *from, unsigned long vaddr, struct vm_area_struct *vma) from 75 arch/arm/mm/copypage-v6.c if (!test_and_set_bit(PG_dcache_clean, &from->flags)) from 76 arch/arm/mm/copypage-v6.c __flush_dcache_page(page_mapping_file(from), from); from 90 arch/arm/mm/copypage-v6.c set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL)); from 63 arch/arm/mm/copypage-xsc3.c void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, from 69 arch/arm/mm/copypage-xsc3.c kfrom = kmap_atomic(from); from 70 arch/arm/mm/copypage-xsc3.c flush_cache_page(vma, vaddr, page_to_pfn(from)); from 36 arch/arm/mm/copypage-xscale.c static void mc_copy_user_page(void *from, void *to) from 79 arch/arm/mm/copypage-xscale.c : "+&r" (from), "+&r" (to), "=&r" (tmp) from 84 arch/arm/mm/copypage-xscale.c void xscale_mc_copy_user_highpage(struct page *to, struct page *from, from 89 arch/arm/mm/copypage-xscale.c if (!test_and_set_bit(PG_dcache_clean, &from->flags)) from 90 arch/arm/mm/copypage-xscale.c __flush_dcache_page(page_mapping_file(from), from); from 94 arch/arm/mm/copypage-xscale.c set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); from 401 arch/arm/net/bpf_jit_32.c int to, from; from 406 arch/arm/net/bpf_jit_32.c from = ctx->offsets[bpf_from]; from 408 arch/arm/net/bpf_jit_32.c return to - from - 1; from 455 arch/arm/net/bpf_jit_32.c int to, from; from 460 arch/arm/net/bpf_jit_32.c from = ctx->idx; from 462 arch/arm/net/bpf_jit_32.c return to - from - 2; from 718 arch/arm/plat-samsung/devs.c struct s3c2410_nand_set *from = npd->sets; from 722 arch/arm/plat-samsung/devs.c to = kmemdup(from, size, GFP_KERNEL); from 138 arch/arm64/include/asm/assembler.h .macro _asm_extable, from, to from 167 arch/arm64/include/asm/fpsimdmacros.h .macro __for from:req, to:req from 176 arch/arm64/include/asm/fpsimdmacros.h .macro _for var:req, from:req, to:req, insn:vararg from 147 arch/arm64/include/asm/kvm_mmu.h int create_hyp_mappings(void *from, void *to, pgprot_t prot); from 11 arch/arm64/include/asm/numa.h int __node_distance(int from, int to); from 34 arch/arm64/include/asm/numa.h void __init numa_set_distance(int from, int to, int distance); from 19 arch/arm64/include/asm/page.h extern void __cpu_copy_user_page(void *to, const void *from, from 21 arch/arm64/include/asm/page.h extern void copy_page(void *to, const void *from); from 25 arch/arm64/include/asm/page.h #define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) from 98 arch/arm64/include/asm/uaccess.h #define _ASM_EXTABLE(from, to) \ from 101 arch/arm64/include/asm/uaccess.h " .long (" #from " - .), (" #to " - .)\n" \ from 383 arch/arm64/include/asm/uaccess.h extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); from 384 arch/arm64/include/asm/uaccess.h #define raw_copy_from_user(to, from, n) \ from 389 arch/arm64/include/asm/uaccess.h __uaccess_mask_ptr(from), (n)); \ from 394 arch/arm64/include/asm/uaccess.h extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); from 395 arch/arm64/include/asm/uaccess.h #define raw_copy_to_user(to, from, n) \ from 400 arch/arm64/include/asm/uaccess.h (from), (n)); \ from 405 arch/arm64/include/asm/uaccess.h extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); from 406 arch/arm64/include/asm/uaccess.h #define raw_copy_in_user(to, from, n) \ from 411 arch/arm64/include/asm/uaccess.h __uaccess_mask_ptr(from), (n)); \ from 438 arch/arm64/include/asm/uaccess.h extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); from 15 arch/arm64/kernel/io.c void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) from 17 arch/arm64/kernel/io.c while (count && !IS_ALIGNED((unsigned long)from, 8)) { from 18 arch/arm64/kernel/io.c *(u8 *)to = __raw_readb(from); from 19 arch/arm64/kernel/io.c from++; from 25 arch/arm64/kernel/io.c *(u64 *)to = __raw_readq(from); from 26 arch/arm64/kernel/io.c from += 8; from 32 arch/arm64/kernel/io.c *(u8 *)to = __raw_readb(from); from 33 arch/arm64/kernel/io.c from++; from 43 arch/arm64/kernel/io.c void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) from 46 arch/arm64/kernel/io.c __raw_writeb(*(u8 *)from, to); from 47 arch/arm64/kernel/io.c from++; from 53 arch/arm64/kernel/io.c __raw_writeq(*(u64 *)from, to); from 54 arch/arm64/kernel/io.c from += 8; from 60 arch/arm64/kernel/io.c __raw_writeb(*(u8 *)from, to); from 61 arch/arm64/kernel/io.c from++; from 28 arch/arm64/lib/uaccess_flushcache.c unsigned long __copy_user_flushcache(void *to, const void __user *from, from 34 arch/arm64/lib/uaccess_flushcache.c rc = __arch_copy_from_user(to, from, n); from 149 arch/arm64/mm/numa.c static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) from 151 arch/arm64/mm/numa.c return node_distance(early_cpu_to_node(from), early_cpu_to_node(to)); from 312 arch/arm64/mm/numa.c void __init numa_set_distance(int from, int to, int distance) from 319 arch/arm64/mm/numa.c if (from >= numa_distance_cnt || to >= numa_distance_cnt || from 320 arch/arm64/mm/numa.c from < 0 || to < 0) { from 322 arch/arm64/mm/numa.c from, to, distance); from 327 arch/arm64/mm/numa.c (from == to && distance != LOCAL_DISTANCE)) { from 329 arch/arm64/mm/numa.c from, to, distance); from 333 arch/arm64/mm/numa.c numa_distance[from * numa_distance_cnt + to] = distance; from 339 arch/arm64/mm/numa.c int __node_distance(int from, int to) from 341 arch/arm64/mm/numa.c if (from >= numa_distance_cnt || to >= numa_distance_cnt) from 342 arch/arm64/mm/numa.c return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; from 343 arch/arm64/mm/numa.c return numa_distance[from * numa_distance_cnt + to]; from 149 arch/arm64/net/bpf_jit_comp.c int from = ctx->offset[bpf_from] - 1; from 151 arch/arm64/net/bpf_jit_comp.c return to - from; from 165 arch/arm64/net/bpf_jit_comp.c int from = ctx->idx; from 167 arch/arm64/net/bpf_jit_comp.c return to - from; from 14 arch/c6x/include/asm/string.h asmlinkage extern void *memcpy(void *to, const void *from, size_t n); from 17 arch/c6x/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 25 arch/c6x/include/asm/uaccess.h *(u8 *)to = *(u8 __force *)from; from 32 arch/c6x/include/asm/uaccess.h : "A"(to), "a"(from) from 40 arch/c6x/include/asm/uaccess.h : "a"(to), "a"(from) from 48 arch/c6x/include/asm/uaccess.h memcpy(to, (const void __force *)from, n); from 53 arch/c6x/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 61 arch/c6x/include/asm/uaccess.h *(u8 __force *)to = *(u8 *)from; from 68 arch/c6x/include/asm/uaccess.h : "a"(to), "a"(from) from 76 arch/c6x/include/asm/uaccess.h : "a"(to), "a"(from) from 84 arch/c6x/include/asm/uaccess.h memcpy((void __force *)to, from, n); from 22 arch/csky/abiv1/inc/abi/page.h static inline void copy_user_page(void *to, void *from, unsigned long vaddr, from 25 arch/csky/abiv1/inc/abi/page.h copy_page(to, from); from 10 arch/csky/abiv2/inc/abi/page.h static inline void copy_user_page(void *to, void *from, unsigned long vaddr, from 13 arch/csky/abiv2/inc/abi/page.h copy_page(to, from); from 45 arch/csky/include/asm/page.h extern void *memcpy(void *to, const void *from, size_t l); from 48 arch/csky/include/asm/page.h #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) from 257 arch/csky/include/asm/uaccess.h #define ___copy_to_user(to, from, n) \ from 313 arch/csky/include/asm/uaccess.h : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \ from 315 arch/csky/include/asm/uaccess.h : "0"(n), "1"(to), "2"(from) \ from 319 arch/csky/include/asm/uaccess.h #define ___copy_from_user(to, from, n) \ from 380 arch/csky/include/asm/uaccess.h : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \ from 382 arch/csky/include/asm/uaccess.h : "0"(n), "1"(to), "2"(from) \ from 386 arch/csky/include/asm/uaccess.h unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n); from 387 arch/csky/include/asm/uaccess.h unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n); from 7 arch/csky/lib/usercopy.c unsigned long raw_copy_from_user(void *to, const void *from, from 10 arch/csky/lib/usercopy.c ___copy_from_user(to, from, n); from 15 arch/csky/lib/usercopy.c unsigned long raw_copy_to_user(void *to, const void *from, from 18 arch/csky/lib/usercopy.c ___copy_to_user(to, from, n); from 22 arch/h8300/kernel/h8300_ksyms.c asmlinkage long strncpy_from_user(void *to, void *from, size_t n); from 116 arch/hexagon/include/asm/page.h #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) from 122 arch/hexagon/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) from 51 arch/hexagon/include/asm/uaccess.h unsigned long raw_copy_from_user(void *to, const void __user *from, from 53 arch/hexagon/include/asm/uaccess.h unsigned long raw_copy_to_user(void __user *to, const void *from, from 62 arch/ia64/include/asm/numa.h #define slit_distance(from,to) (numa_slit[(from) * MAX_NUMNODES + (to)]) from 63 arch/ia64/include/asm/numa.h extern int __node_distance(int from, int to); from 64 arch/ia64/include/asm/numa.h #define node_distance(from,to) __node_distance(from, to) from 66 arch/ia64/include/asm/page.h extern void copy_page (void *to, void *from); from 78 arch/ia64/include/asm/page.h #define copy_user_page(to, from, vaddr, page) \ from 80 arch/ia64/include/asm/page.h copy_page((to), (from)); \ from 198 arch/ia64/include/asm/uaccess.h extern unsigned long __must_check __copy_user (void __user *to, const void __user *from, from 202 arch/ia64/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long count) from 204 arch/ia64/include/asm/uaccess.h return __copy_user(to, (__force void __user *) from, count); from 208 arch/ia64/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long count) from 210 arch/ia64/include/asm/uaccess.h return __copy_user((__force void __user *) to, from, count); from 233 arch/ia64/include/asm/uaccess.h extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len); from 235 arch/ia64/include/asm/uaccess.h #define strncpy_from_user(to, from, n) \ from 237 arch/ia64/include/asm/uaccess.h const char __user * __sfu_from = (from); \ from 67 arch/ia64/kernel/kprobes.c static void __kprobes set_brl_inst(void *from, void *to) from 69 arch/ia64/kernel/kprobes.c s64 rel = ((s64) to - (s64) from) >> 4; from 71 arch/ia64/kernel/kprobes.c brl = (bundle_t *) ((u64) from & ~0xf); from 6606 arch/ia64/kernel/perfmon.c dump_pmu_state(const char *from) from 6628 arch/ia64/kernel/perfmon.c from, from 11 arch/ia64/lib/io.c void memcpy_fromio(void *to, const volatile void __iomem *from, long count) from 17 arch/ia64/lib/io.c *dst++ = readb(from++); from 26 arch/ia64/lib/io.c void memcpy_toio(volatile void __iomem *to, const void *from, long count) from 28 arch/ia64/lib/io.c const char *src = from; from 39 arch/ia64/mm/numa.c int __node_distance(int from, int to) from 41 arch/ia64/mm/numa.c return slit_distance(from, to); from 44 arch/m68k/fpsp040/fpsp.h | Positive offsets from A6 refer to the exception frame. Negative from 46 arch/m68k/fpsp040/fpsp.h | The fsave frame is also accessible from the top via A7. from 70 arch/m68k/fpsp040/fpsp.h | restored from the "local variable" area and can be used as from 124 arch/m68k/fpsp040/fpsp.h | Offsets are defined from the end of an fsave because the last 10 from 1725 arch/m68k/ifpsp060/src/fpsp.S # if the exception occurred from supervisor mode, check if from 4143 arch/m68k/ifpsp060/src/fpsp.S # if the instruction was executed from supervisor mode and the addressing from 14840 arch/m68k/ifpsp060/src/fpsp.S # if the precision is extended, this result could not have come from an from 15293 arch/m68k/ifpsp060/src/fpsp.S # if the precision is extended, this result could not have come from an from 1724 arch/m68k/ifpsp060/src/pfpsp.S # if the exception occurred from supervisor mode, check if from 11271 arch/m68k/ifpsp060/src/pfpsp.S # if the precision is extended, this result could not have come from an from 11724 arch/m68k/ifpsp060/src/pfpsp.S # if the precision is extended, this result could not have come from an from 35 arch/m68k/include/asm/openprom.h int (*v0_seekdev)(int dev_desc, long logical_offst, int from); from 14 arch/m68k/include/asm/page_mm.h static inline void copy_page(void *to, void *from) from 24 arch/m68k/include/asm/page_mm.h : "=a" (to), "=a" (from), "=d" (tmp) from 25 arch/m68k/include/asm/page_mm.h : "0" (to), "1" (from) , "2" (PAGE_SIZE / 32 - 1) from 53 arch/m68k/include/asm/page_mm.h #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) from 60 arch/m68k/include/asm/page_mm.h #define copy_user_page(to, from, vaddr, page) \ from 61 arch/m68k/include/asm/page_mm.h do { copy_page(to, from); \ from 11 arch/m68k/include/asm/page_no.h #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) from 14 arch/m68k/include/asm/page_no.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) from 182 arch/m68k/include/asm/uaccess_mm.h unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n); from 183 arch/m68k/include/asm/uaccess_mm.h unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n); from 190 arch/m68k/include/asm/uaccess_mm.h #define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\ from 225 arch/m68k/include/asm/uaccess_mm.h : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \ from 228 arch/m68k/include/asm/uaccess_mm.h #define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\ from 229 arch/m68k/include/asm/uaccess_mm.h ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3) from 230 arch/m68k/include/asm/uaccess_mm.h #define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3) \ from 231 arch/m68k/include/asm/uaccess_mm.h ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, \ from 235 arch/m68k/include/asm/uaccess_mm.h __constant_copy_from_user(void *to, const void __user *from, unsigned long n) from 241 arch/m68k/include/asm/uaccess_mm.h __constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0); from 244 arch/m68k/include/asm/uaccess_mm.h __constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0); from 247 arch/m68k/include/asm/uaccess_mm.h __constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0); from 250 arch/m68k/include/asm/uaccess_mm.h __constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0); from 253 arch/m68k/include/asm/uaccess_mm.h __constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0); from 256 arch/m68k/include/asm/uaccess_mm.h __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0); from 259 arch/m68k/include/asm/uaccess_mm.h __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1); from 262 arch/m68k/include/asm/uaccess_mm.h __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0); from 265 arch/m68k/include/asm/uaccess_mm.h __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1); from 268 arch/m68k/include/asm/uaccess_mm.h __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2); from 271 arch/m68k/include/asm/uaccess_mm.h __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4); from 275 arch/m68k/include/asm/uaccess_mm.h return __generic_copy_from_user(to, from, n); from 281 arch/m68k/include/asm/uaccess_mm.h #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ from 312 arch/m68k/include/asm/uaccess_mm.h : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \ from 316 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user(void __user *to, const void *from, unsigned long n) from 322 arch/m68k/include/asm/uaccess_mm.h __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1); from 325 arch/m68k/include/asm/uaccess_mm.h __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2); from 328 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,); from 331 arch/m68k/include/asm/uaccess_mm.h __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4); from 334 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,); from 337 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,); from 340 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b); from 343 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,); from 346 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b); from 349 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w); from 352 arch/m68k/include/asm/uaccess_mm.h __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l); from 356 arch/m68k/include/asm/uaccess_mm.h return __generic_copy_to_user(to, from, n); from 363 arch/m68k/include/asm/uaccess_mm.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 366 arch/m68k/include/asm/uaccess_mm.h return __constant_copy_from_user(to, from, n); from 367 arch/m68k/include/asm/uaccess_mm.h return __generic_copy_from_user(to, from, n); from 371 arch/m68k/include/asm/uaccess_mm.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 374 arch/m68k/include/asm/uaccess_mm.h return __constant_copy_to_user(to, from, n); from 375 arch/m68k/include/asm/uaccess_mm.h return __generic_copy_to_user(to, from, n); from 106 arch/m68k/include/asm/uaccess_no.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 108 arch/m68k/include/asm/uaccess_no.h memcpy(to, (__force const void *)from, n); from 113 arch/m68k/include/asm/uaccess_no.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 115 arch/m68k/include/asm/uaccess_no.h memcpy((__force void *)to, from, n); from 10 arch/m68k/lib/memcpy.c void *memcpy(void *to, const void *from, size_t n) from 19 arch/m68k/lib/memcpy.c const char *cfrom = from; from 22 arch/m68k/lib/memcpy.c from = cfrom; from 26 arch/m68k/lib/memcpy.c if ((long)from & 1) { from 28 arch/m68k/lib/memcpy.c const char *cfrom = from; from 36 arch/m68k/lib/memcpy.c const short *sfrom = from; from 39 arch/m68k/lib/memcpy.c from = sfrom; from 45 arch/m68k/lib/memcpy.c const long *lfrom = from; from 73 arch/m68k/lib/memcpy.c from = lfrom; from 77 arch/m68k/lib/memcpy.c const short *sfrom = from; from 80 arch/m68k/lib/memcpy.c from = sfrom; from 84 arch/m68k/lib/memcpy.c const char *cfrom = from; from 10 arch/m68k/lib/uaccess.c unsigned long __generic_copy_from_user(void *to, const void __user *from, from 49 arch/m68k/lib/uaccess.c : "=d" (res), "+a" (from), "+a" (to), "=&d" (tmp) from 56 arch/m68k/lib/uaccess.c unsigned long __generic_copy_to_user(void __user *to, const void *from, from 93 arch/m68k/lib/uaccess.c : "=d" (res), "+a" (from), "+a" (to), "=&d" (tmp) from 133 arch/m68k/math-emu/fp_decode.h | decode the 8bit displacement from the brief extension word from 227 arch/m68k/math-emu/fp_decode.h | adjust stack for byte moves from/to stack from 405 arch/m68k/math-emu/fp_decode.h | get the absolute short address from user space from 411 arch/m68k/math-emu/fp_decode.h | get the absolute long address from user space from 78 arch/microblaze/include/asm/page.h # define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) from 95 arch/microblaze/include/asm/uaccess.h const void __user *from, unsigned long size); from 314 arch/microblaze/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 316 arch/microblaze/include/asm/uaccess.h return __copy_tofrom_user((__force void __user *)to, from, n); from 320 arch/microblaze/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 322 arch/microblaze/include/asm/uaccess.h return __copy_tofrom_user(to, (__force const void __user *)from, n); from 330 arch/microblaze/include/asm/uaccess.h extern int __strncpy_user(char *to, const char __user *from, int len); from 50 arch/mips/cavium-octeon/flash_setup.c unsigned long from, ssize_t len) from 53 arch/mips/cavium-octeon/flash_setup.c inline_map_copy_from(map, to, from, len); from 58 arch/mips/cavium-octeon/flash_setup.c const void *from, ssize_t len) from 61 arch/mips/cavium-octeon/flash_setup.c inline_map_copy_to(map, to, from, len); from 659 arch/mips/include/asm/io.h void __ioread64_copy(void *to, const void __iomem *from, size_t count); from 121 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h # Get my GP from the global variable from 124 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h # Get my SP from the global variable from 28 arch/mips/include/asm/mach-ip27/topology.h #define node_distance(from, to) (__node_distances[(from)][(to)]) from 17 arch/mips/include/asm/mach-loongson64/topology.h #define node_distance(from, to) (__node_distances[(from)][(to)]) from 58 arch/mips/include/asm/msa.h extern void write_msa_wr_b(unsigned idx, union fpureg *from); from 59 arch/mips/include/asm/msa.h extern void write_msa_wr_h(unsigned idx, union fpureg *from); from 60 arch/mips/include/asm/msa.h extern void write_msa_wr_w(unsigned idx, union fpureg *from); from 61 arch/mips/include/asm/msa.h extern void write_msa_wr_d(unsigned idx, union fpureg *from); from 72 arch/mips/include/asm/msa.h static inline void write_msa_wr(unsigned idx, union fpureg *from, from 77 arch/mips/include/asm/msa.h write_msa_wr_b(idx, from); from 81 arch/mips/include/asm/msa.h write_msa_wr_h(idx, from); from 85 arch/mips/include/asm/msa.h write_msa_wr_w(idx, from); from 89 arch/mips/include/asm/msa.h write_msa_wr_d(idx, from); from 91 arch/mips/include/asm/page.h extern void copy_page(void * to, void * from); from 114 arch/mips/include/asm/page.h extern void copy_user_highpage(struct page *to, struct page *from, from 495 arch/mips/include/asm/pgtable.h extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); from 519 arch/mips/include/asm/uaccess.h #define __invoke_copy_from(func, to, from, n) \ from 526 arch/mips/include/asm/uaccess.h __cu_from_r = (from); \ from 542 arch/mips/include/asm/uaccess.h #define __invoke_copy_to(func, to, from, n) \ from 549 arch/mips/include/asm/uaccess.h __cu_from_r = (from); \ from 560 arch/mips/include/asm/uaccess.h #define __invoke_copy_from_kernel(to, from, n) \ from 561 arch/mips/include/asm/uaccess.h __invoke_copy_from(__copy_user, to, from, n) from 563 arch/mips/include/asm/uaccess.h #define __invoke_copy_to_kernel(to, from, n) \ from 564 arch/mips/include/asm/uaccess.h __invoke_copy_to(__copy_user, to, from, n) from 566 arch/mips/include/asm/uaccess.h #define ___invoke_copy_in_kernel(to, from, n) \ from 567 arch/mips/include/asm/uaccess.h __invoke_copy_from(__copy_user, to, from, n) from 570 arch/mips/include/asm/uaccess.h #define __invoke_copy_from_user(to, from, n) \ from 571 arch/mips/include/asm/uaccess.h __invoke_copy_from(__copy_user, to, from, n) from 573 arch/mips/include/asm/uaccess.h #define __invoke_copy_to_user(to, from, n) \ from 574 arch/mips/include/asm/uaccess.h __invoke_copy_to(__copy_user, to, from, n) from 576 arch/mips/include/asm/uaccess.h #define ___invoke_copy_in_user(to, from, n) \ from 577 arch/mips/include/asm/uaccess.h __invoke_copy_from(__copy_user, to, from, n) from 593 arch/mips/include/asm/uaccess.h #define __invoke_copy_from_user(to, from, n) \ from 594 arch/mips/include/asm/uaccess.h __invoke_copy_from(__copy_from_user_eva, to, from, n) from 596 arch/mips/include/asm/uaccess.h #define __invoke_copy_to_user(to, from, n) \ from 597 arch/mips/include/asm/uaccess.h __invoke_copy_to(__copy_to_user_eva, to, from, n) from 599 arch/mips/include/asm/uaccess.h #define ___invoke_copy_in_user(to, from, n) \ from 600 arch/mips/include/asm/uaccess.h __invoke_copy_from(__copy_in_user_eva, to, from, n) from 605 arch/mips/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 608 arch/mips/include/asm/uaccess.h return __invoke_copy_to_kernel(to, from, n); from 610 arch/mips/include/asm/uaccess.h return __invoke_copy_to_user(to, from, n); from 614 arch/mips/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 617 arch/mips/include/asm/uaccess.h return __invoke_copy_from_kernel(to, from, n); from 619 arch/mips/include/asm/uaccess.h return __invoke_copy_from_user(to, from, n); from 626 arch/mips/include/asm/uaccess.h raw_copy_in_user(void __user*to, const void __user *from, unsigned long n) from 629 arch/mips/include/asm/uaccess.h return ___invoke_copy_in_kernel(to, from, n); from 631 arch/mips/include/asm/uaccess.h return ___invoke_copy_in_user(to, from, n); from 16 arch/mips/lib/iomap_copy.c void __ioread64_copy(void *to, const void __iomem *from, size_t count) from 20 arch/mips/lib/iomap_copy.c const u64 __iomem *src = from; from 26 arch/mips/lib/iomap_copy.c __ioread32_copy(to, from, count * 2); from 171 arch/mips/mm/init.c void copy_user_highpage(struct page *to, struct page *from, from 178 arch/mips/mm/init.c page_mapcount(from) && !Page_dcache_dirty(from)) { from 179 arch/mips/mm/init.c vfrom = kmap_coherent(from, vaddr); from 183 arch/mips/mm/init.c vfrom = kmap_atomic(from); from 613 arch/mips/mm/page.c extern void copy_page_cpu(void *to, void *from); from 651 arch/mips/mm/page.c void copy_page(void *to, void *from) from 653 arch/mips/mm/page.c u64 from_phys = CPHYSADDR((unsigned long)from); from 659 arch/mips/mm/page.c || (long)KSEGX((unsigned long)from) != (long)CKSEG0) from 660 arch/mips/mm/page.c return copy_page_cpu(to, from); from 327 arch/mips/txx9/rbtx4939/setup.c unsigned long from, ssize_t len) from 333 arch/mips/txx9/rbtx4939/setup.c from += (unsigned long)map->virt; from 339 arch/mips/txx9/rbtx4939/setup.c 0x400000 - (from & (0x400000 - 1))); from 341 arch/mips/txx9/rbtx4939/setup.c (void *)((from & ~0xc00000) | from 342 arch/mips/txx9/rbtx4939/setup.c ((((from >> 22) + shift) & 3) << 22)), from 345 arch/mips/txx9/rbtx4939/setup.c from += curlen; from 355 arch/mips/txx9/rbtx4939/setup.c 0x400000 - (from & (0x400000 - 1))); from 356 arch/mips/txx9/rbtx4939/setup.c memcpy(to, (void *)(from ^ 0x400000), curlen); from 358 arch/mips/txx9/rbtx4939/setup.c from += curlen; from 364 arch/mips/txx9/rbtx4939/setup.c memcpy(to, (void *)from, len); from 26 arch/nds32/include/asm/page.h extern void copy_user_highpage(struct page *to, struct page *from, from 37 arch/nds32/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) from 41 arch/nds32/include/asm/page.h void copy_page(void *to, void *from); from 266 arch/nds32/include/asm/uaccess.h extern unsigned long __arch_copy_from_user(void *to, const void __user * from, from 268 arch/nds32/include/asm/uaccess.h extern unsigned long __arch_copy_to_user(void __user * to, const void *from, from 197 arch/nds32/mm/cacheflush.c void copy_user_highpage(struct page *to, struct page *from, from 202 arch/nds32/mm/cacheflush.c kfrom = ((unsigned long)page_address(from) & PAGE_MASK); from 204 arch/nds32/mm/cacheflush.c pfrom = page_to_phys(from); from 49 arch/nios2/include/asm/page.h #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) from 78 arch/nios2/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n); from 80 arch/nios2/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n); from 40 arch/openrisc/include/asm/page.h #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) from 43 arch/openrisc/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) from 237 arch/openrisc/include/asm/uaccess.h __copy_tofrom_user(void *to, const void *from, unsigned long size); from 239 arch/openrisc/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long size) from 241 arch/openrisc/include/asm/uaccess.h return __copy_tofrom_user(to, (__force const void *)from, size); from 244 arch/openrisc/include/asm/uaccess.h raw_copy_to_user(void *to, const void __user *from, unsigned long size) from 246 arch/openrisc/include/asm/uaccess.h return __copy_tofrom_user((__force void *)to, from, size); from 45 arch/parisc/include/asm/alternative.h #define ALTERNATIVE(from, to, cond, replacement)\ from 47 arch/parisc/include/asm/alternative.h .word (from - .), (to - from)/4 ! \ from 52 arch/parisc/include/asm/alternative.h #define ALTERNATIVE_CODE(from, num_instructions, cond, new_instr_ptr)\ from 54 arch/parisc/include/asm/alternative.h .word (from - .), -num_instructions ! \ from 26 arch/parisc/include/asm/page.h #define copy_page(to, from) copy_page_asm((void *)(to), (void *)(from)) from 31 arch/parisc/include/asm/page.h void copy_page_asm(void *to, void *from); from 31 arch/parisc/kernel/alternative.c u32 *from, cond, replacement; from 34 arch/parisc/kernel/alternative.c from = (u32 *)((ulong)&entry->orig_offset + entry->orig_offset); from 45 arch/parisc/kernel/alternative.c index, cond, len, from, replacement); from 68 arch/parisc/kernel/alternative.c replacement = *from; from 81 arch/parisc/kernel/alternative.c index, cond, len, replacement, from, from); from 88 arch/parisc/kernel/alternative.c memcpy(from, source, 4 * len); from 91 arch/parisc/kernel/alternative.c *from = replacement; from 105 arch/powerpc/include/asm/book3s/32/kup.h static __always_inline void allow_user_access(void __user *to, const void __user *from, from 124 arch/powerpc/include/asm/book3s/32/kup.h static __always_inline void prevent_user_access(void __user *to, const void __user *from, from 80 arch/powerpc/include/asm/book3s/64/kup-radix.h static __always_inline void allow_user_access(void __user *to, const void __user *from, from 93 arch/powerpc/include/asm/book3s/64/kup-radix.h static inline void prevent_user_access(void __user *to, const void __user *from, from 48 arch/powerpc/include/asm/kup.h static inline void allow_user_access(void __user *to, const void __user *from, from 50 arch/powerpc/include/asm/kup.h static inline void prevent_user_access(void __user *to, const void __user *from, from 59 arch/powerpc/include/asm/kup.h static inline void allow_read_from_user(const void __user *from, unsigned long size) from 61 arch/powerpc/include/asm/kup.h allow_user_access(NULL, from, size, KUAP_READ); from 69 arch/powerpc/include/asm/kup.h static inline void allow_read_write_user(void __user *to, const void __user *from, from 72 arch/powerpc/include/asm/kup.h allow_user_access(to, from, size, KUAP_READ_WRITE); from 75 arch/powerpc/include/asm/kup.h static inline void prevent_read_from_user(const void __user *from, unsigned long size) from 77 arch/powerpc/include/asm/kup.h prevent_user_access(NULL, from, size, KUAP_READ); from 85 arch/powerpc/include/asm/kup.h static inline void prevent_read_write_user(void __user *to, const void __user *from, from 88 arch/powerpc/include/asm/kup.h prevent_user_access(to, from, size, KUAP_READ_WRITE); from 181 arch/powerpc/include/asm/kvm_book3s.h gva_t eaddr, void *to, void *from, from 186 arch/powerpc/include/asm/kvm_book3s.h void *from, unsigned long n); from 74 arch/powerpc/include/asm/kvm_fpu.h extern void kvm_cvt_fd(u32 *from, u64 *to); from 75 arch/powerpc/include/asm/kvm_fpu.h extern void kvm_cvt_df(u64 *from, u32 *to); from 36 arch/powerpc/include/asm/nohash/32/kup-8xx.h static inline void allow_user_access(void __user *to, const void __user *from, from 42 arch/powerpc/include/asm/nohash/32/kup-8xx.h static inline void prevent_user_access(void __user *to, const void __user *from, from 322 arch/powerpc/include/asm/page.h extern void copy_user_page(void *to, void *from, unsigned long vaddr, from 59 arch/powerpc/include/asm/page_32.h extern void copy_page(void *to, void *from); from 80 arch/powerpc/include/asm/page_64.h extern void copy_page(void *to, void *from); from 429 arch/powerpc/include/asm/processor.h extern void cvt_fd(float *from, double *to); from 430 arch/powerpc/include/asm/processor.h extern void cvt_df(double *from, float *to); from 34 arch/powerpc/include/asm/string.h void *__memcpy(void *to, const void *from, __kernel_size_t n); from 35 arch/powerpc/include/asm/string.h void *__memmove(void *to, const void *from, __kernel_size_t n); from 307 arch/powerpc/include/asm/uaccess.h const void __user *from, unsigned long size); from 311 arch/powerpc/include/asm/uaccess.h raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) from 316 arch/powerpc/include/asm/uaccess.h allow_read_write_user(to, from, n); from 317 arch/powerpc/include/asm/uaccess.h ret = __copy_tofrom_user(to, from, n); from 318 arch/powerpc/include/asm/uaccess.h prevent_read_write_user(to, from, n); from 324 arch/powerpc/include/asm/uaccess.h const void __user *from, unsigned long n) from 333 arch/powerpc/include/asm/uaccess.h __get_user_size(*(u8 *)to, from, 1, ret); from 337 arch/powerpc/include/asm/uaccess.h __get_user_size(*(u16 *)to, from, 2, ret); from 341 arch/powerpc/include/asm/uaccess.h __get_user_size(*(u32 *)to, from, 4, ret); from 345 arch/powerpc/include/asm/uaccess.h __get_user_size(*(u64 *)to, from, 8, ret); from 353 arch/powerpc/include/asm/uaccess.h allow_read_from_user(from, n); from 354 arch/powerpc/include/asm/uaccess.h ret = __copy_tofrom_user((__force void __user *)to, from, n); from 355 arch/powerpc/include/asm/uaccess.h prevent_read_from_user(from, n); from 360 arch/powerpc/include/asm/uaccess.h const void *from, unsigned long n) from 368 arch/powerpc/include/asm/uaccess.h __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret); from 371 arch/powerpc/include/asm/uaccess.h __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret); from 374 arch/powerpc/include/asm/uaccess.h __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret); from 377 arch/powerpc/include/asm/uaccess.h __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret); from 385 arch/powerpc/include/asm/uaccess.h ret = __copy_tofrom_user(to, (__force const void __user *)from, n); from 391 arch/powerpc/include/asm/uaccess.h copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n) from 393 arch/powerpc/include/asm/uaccess.h if (likely(check_copy_size(from, n, true))) { from 396 arch/powerpc/include/asm/uaccess.h n = memcpy_mcsafe((void *)to, from, n); from 773 arch/powerpc/kernel/setup_64.c static int pcpu_cpu_distance(unsigned int from, unsigned int to) from 775 arch/powerpc/kernel/setup_64.c if (early_cpu_to_node(from) == early_cpu_to_node(to)) from 29 arch/powerpc/kernel/signal.h void __user *from); from 31 arch/powerpc/kernel/signal.h void __user *from); from 40 arch/powerpc/kernel/signal.h void __user *from); from 42 arch/powerpc/kernel/signal.h void __user *from); from 253 arch/powerpc/kernel/signal_32.c void __user *from) from 258 arch/powerpc/kernel/signal_32.c if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) from 280 arch/powerpc/kernel/signal_32.c void __user *from) from 285 arch/powerpc/kernel/signal_32.c if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) from 307 arch/powerpc/kernel/signal_32.c void __user *from) from 312 arch/powerpc/kernel/signal_32.c if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) from 334 arch/powerpc/kernel/signal_32.c void __user *from) from 339 arch/powerpc/kernel/signal_32.c if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) from 355 arch/powerpc/kernel/signal_32.c void __user *from) from 357 arch/powerpc/kernel/signal_32.c return __copy_from_user(task->thread.fp_state.fpr, from, from 370 arch/powerpc/kernel/signal_32.c void __user *from) from 372 arch/powerpc/kernel/signal_32.c return __copy_from_user(task->thread.ckfp_state.fpr, from, from 31 arch/powerpc/kvm/book3s_64_mmu_radix.c gva_t eaddr, void *to, void *from, from 41 arch/powerpc/kvm/book3s_64_mmu_radix.c __pa(to), __pa(from), n); from 47 arch/powerpc/kvm/book3s_64_mmu_radix.c from = (void *) (eaddr | (quadrant << 62)); from 66 arch/powerpc/kvm/book3s_64_mmu_radix.c ret = raw_copy_from_user(to, from, n); from 68 arch/powerpc/kvm/book3s_64_mmu_radix.c ret = raw_copy_to_user(to, from, n); from 85 arch/powerpc/kvm/book3s_64_mmu_radix.c void *to, void *from, unsigned long n) from 104 arch/powerpc/kvm/book3s_64_mmu_radix.c return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n); from 120 arch/powerpc/kvm/book3s_64_mmu_radix.c long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from, from 123 arch/powerpc/kvm/book3s_64_mmu_radix.c return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n); from 801 arch/powerpc/kvm/book3s_hv.c static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from, from 810 arch/powerpc/kvm/book3s_hv.c from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT); from 813 arch/powerpc/kvm/book3s_hv.c if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages) from 816 arch/powerpc/kvm/book3s_hv.c from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT); from 819 arch/powerpc/kvm/book3s_hv.c from_addr |= (from & (PAGE_SIZE - 1)); from 514 arch/powerpc/perf/core-book3s.c cpuhw->bhrb_entries[u_index].from = addr; from 518 arch/powerpc/perf/core-book3s.c cpuhw->bhrb_entries[u_index].from = addr; from 139 arch/powerpc/platforms/ps3/platform.h int ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from, from 458 arch/powerpc/platforms/ps3/repository.c int ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from, from 465 arch/powerpc/platforms/ps3/repository.c for (i = from; i < 10; i++) { from 51 arch/riscv/include/asm/page.h #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) from 371 arch/riscv/include/asm/uaccess.h const void *from, unsigned long n); from 373 arch/riscv/include/asm/uaccess.h const void __user *from, unsigned long n); from 376 arch/riscv/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 378 arch/riscv/include/asm/uaccess.h return __asm_copy_from_user(to, from, n); from 382 arch/riscv/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 384 arch/riscv/include/asm/uaccess.h return __asm_copy_to_user(to, from, n); from 555 arch/riscv/net/bpf_jit_comp.c int from = ctx->offset[bpf_from] - 1, to = ctx->offset[bpf_to]; from 557 arch/riscv/net/bpf_jit_comp.c return (to - from) << 2; from 562 arch/riscv/net/bpf_jit_comp.c int to = ctx->epilogue_offset, from = ctx->ninsns; from 564 arch/riscv/net/bpf_jit_comp.c return (to - from) << 2; from 164 arch/s390/hypfs/inode.c static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from) from 169 arch/s390/hypfs/inode.c size_t count = iov_iter_count(from); from 198 arch/s390/hypfs/inode.c iov_iter_advance(from, count); from 109 arch/s390/include/asm/gmap.h int gmap_map_segment(struct gmap *gmap, unsigned long from, from 116 arch/s390/include/asm/gmap.h void gmap_discard(struct gmap *, unsigned long from, unsigned long to); from 216 arch/s390/include/asm/idals.h idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count) from 223 arch/s390/include/asm/idals.h left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE); from 226 arch/s390/include/asm/idals.h from = (void __user *) from + IDA_BLOCK_SIZE; from 229 arch/s390/include/asm/idals.h return copy_from_user(ib->data[i], from, count); from 56 arch/s390/include/asm/page.h static inline void copy_page(void *to, void *from) from 60 arch/s390/include/asm/page.h register void *reg4 asm ("4") = from; from 69 arch/s390/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) from 53 arch/s390/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n); from 56 arch/s390/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n); from 65 arch/s390/include/asm/uaccess.h #define __put_get_user_asm(to, from, size, spec) \ from 80 arch/s390/include/asm/uaccess.h : "d" (size), "Q" (*(from)), \ from 244 arch/s390/include/asm/uaccess.h raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); from 367 arch/s390/include/asm/vx-insn.h VX_NUM v1, \vfrom from 377 arch/s390/include/asm/vx-insn.h VX_NUM v1, \vfrom from 137 arch/s390/kernel/crash_dump.c unsigned long from, len; from 142 arch/s390/kernel/crash_dump.c from = __pa(src); from 143 arch/s390/kernel/crash_dump.c if (!OLDMEM_BASE && from < sclp.hsa_size) { from 145 arch/s390/kernel/crash_dump.c len = min(count, sclp.hsa_size - from); from 146 arch/s390/kernel/crash_dump.c rc = memcpy_hsa_kernel(dst, from, len); from 151 arch/s390/kernel/crash_dump.c if (OLDMEM_BASE && from - OLDMEM_BASE < OLDMEM_SIZE) { from 152 arch/s390/kernel/crash_dump.c from -= OLDMEM_BASE; from 153 arch/s390/kernel/crash_dump.c len = min(count, OLDMEM_SIZE - from); from 154 arch/s390/kernel/crash_dump.c } else if (OLDMEM_BASE && from < OLDMEM_SIZE) { from 155 arch/s390/kernel/crash_dump.c len = min(count, OLDMEM_SIZE - from); from 156 arch/s390/kernel/crash_dump.c from += OLDMEM_BASE; from 166 arch/s390/kernel/crash_dump.c if (memcpy_real(ra, (void *) from, len)) from 181 arch/s390/kernel/crash_dump.c unsigned long from, len; from 185 arch/s390/kernel/crash_dump.c from = __pa(src); from 186 arch/s390/kernel/crash_dump.c if (!OLDMEM_BASE && from < sclp.hsa_size) { from 188 arch/s390/kernel/crash_dump.c len = min(count, sclp.hsa_size - from); from 189 arch/s390/kernel/crash_dump.c rc = memcpy_hsa_user(dst, from, len); from 194 arch/s390/kernel/crash_dump.c if (OLDMEM_BASE && from - OLDMEM_BASE < OLDMEM_SIZE) { from 195 arch/s390/kernel/crash_dump.c from -= OLDMEM_BASE; from 196 arch/s390/kernel/crash_dump.c len = min(count, OLDMEM_SIZE - from); from 197 arch/s390/kernel/crash_dump.c } else if (OLDMEM_BASE && from < OLDMEM_SIZE) { from 198 arch/s390/kernel/crash_dump.c len = min(count, OLDMEM_SIZE - from); from 199 arch/s390/kernel/crash_dump.c from += OLDMEM_BASE; from 203 arch/s390/kernel/crash_dump.c rc = copy_to_user_real(dst, (void *) from, count); from 240 arch/s390/kernel/crash_dump.c unsigned long from, unsigned long pfn, from 248 arch/s390/kernel/crash_dump.c rc = remap_pfn_range(vma, from, from 254 arch/s390/kernel/crash_dump.c from += size_old; from 257 arch/s390/kernel/crash_dump.c return remap_pfn_range(vma, from, pfn, size, prot); from 267 arch/s390/kernel/crash_dump.c unsigned long from, from 279 arch/s390/kernel/crash_dump.c from += size_hsa; from 282 arch/s390/kernel/crash_dump.c return remap_pfn_range(vma, from, pfn, size, prot); from 288 arch/s390/kernel/crash_dump.c int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from, from 292 arch/s390/kernel/crash_dump.c return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot); from 294 arch/s390/kernel/crash_dump.c return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size, from 191 arch/s390/kernel/irq.c unsigned int arch_dynirq_lower_bound(unsigned int from) from 193 arch/s390/kernel/irq.c return from < NR_IRQS_BASE ? NR_IRQS_BASE : from; from 288 arch/s390/kvm/trace-s390.h TP_PROTO(__u8 isc, __u16 from, __u16 to), from 289 arch/s390/kvm/trace-s390.h TP_ARGS(isc, from, to), from 293 arch/s390/kvm/trace-s390.h __field(__u16, from) from 299 arch/s390/kvm/trace-s390.h __entry->from = from; from 305 arch/s390/kvm/trace-s390.h (__entry->from == KVM_S390_AIS_MODE_ALL) ? from 307 arch/s390/kvm/trace-s390.h (__entry->from == KVM_S390_AIS_MODE_SINGLE) ? from 171 arch/s390/lib/uaccess.c unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 174 arch/s390/lib/uaccess.c return copy_from_user_mvcos(to, from, n); from 175 arch/s390/lib/uaccess.c return copy_from_user_mvcp(to, from, n); from 246 arch/s390/lib/uaccess.c unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 249 arch/s390/lib/uaccess.c return copy_to_user_mvcos(to, from, n); from 250 arch/s390/lib/uaccess.c return copy_to_user_mvcs(to, from, n); from 254 arch/s390/lib/uaccess.c static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, from 272 arch/s390/lib/uaccess.c : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) from 277 arch/s390/lib/uaccess.c static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from, from 305 arch/s390/lib/uaccess.c : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) from 311 arch/s390/lib/uaccess.c unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) from 314 arch/s390/lib/uaccess.c return copy_in_user_mvcos(to, from, n); from 315 arch/s390/lib/uaccess.c return copy_in_user_mvc(to, from, n); from 355 arch/s390/mm/cmm.c static void cmm_smsg_target(const char *from, char *msg) from 359 arch/s390/mm/cmm.c if (strlen(sender) > 0 && strcmp(from, sender) != 0) from 427 arch/s390/mm/gmap.c int gmap_map_segment(struct gmap *gmap, unsigned long from, from 434 arch/s390/mm/gmap.c if ((from | to | len) & (PMD_SIZE - 1)) from 436 arch/s390/mm/gmap.c if (len == 0 || from + len < from || to + len < to || from 437 arch/s390/mm/gmap.c from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end) from 448 arch/s390/mm/gmap.c (void *) from + off)) from 694 arch/s390/mm/gmap.c void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) from 700 arch/s390/mm/gmap.c for (gaddr = from; gaddr < to; from 224 arch/s390/pci/pci.c void __iowrite64_copy(void __iomem *to, const void *from, size_t count) from 226 arch/s390/pci/pci.c zpci_memcpy_toio(to, from, count); from 282 arch/sh/drivers/dma/dma-api.c int dma_xfer(unsigned int chan, unsigned long from, from 288 arch/sh/drivers/dma/dma-api.c channel->sar = from; from 109 arch/sh/include/asm/dma.h extern int dma_xfer(unsigned int chan, unsigned long from, from 112 arch/sh/include/asm/dma.h #define dma_write(chan, from, to, size) \ from 113 arch/sh/include/asm/dma.h dma_xfer(chan, from, to, size, DMA_MODE_WRITE) from 114 arch/sh/include/asm/dma.h #define dma_write_page(chan, from, to) \ from 115 arch/sh/include/asm/dma.h dma_write(chan, from, to, PAGE_SIZE) from 117 arch/sh/include/asm/dma.h #define dma_read(chan, from, to, size) \ from 118 arch/sh/include/asm/dma.h dma_xfer(chan, from, to, size, DMA_MODE_READ) from 119 arch/sh/include/asm/dma.h #define dma_read_page(chan, from, to) \ from 120 arch/sh/include/asm/dma.h dma_read(chan, from, to, PAGE_SIZE) from 62 arch/sh/include/asm/page.h extern void copy_page(void *to, void *from); from 63 arch/sh/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE) from 68 arch/sh/include/asm/page.h extern void copy_user_highpage(struct page *to, struct page *from, from 111 arch/sh/include/asm/uaccess.h __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); from 114 arch/sh/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 116 arch/sh/include/asm/uaccess.h return __copy_user(to, (__force void *)from, n); from 120 arch/sh/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 122 arch/sh/include/asm/uaccess.h return __copy_user((__force void *)to, from, n); from 152 arch/sh/include/asm/uaccess.h unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt); from 16 arch/sh/kernel/io.c void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count) from 24 arch/sh/kernel/io.c (((u32)to & 0x1f) == 0) && (((u32)from & 0x3) == 0)) { from 52 arch/sh/kernel/io.c "=&r" (tmp5), "=&r" (tmp6), "=&r" (from) from 53 arch/sh/kernel/io.c : "7"(from), "0" (to), "1" (count) from 58 arch/sh/kernel/io.c if ((((u32)to | (u32)from) & 0x3) == 0) { from 60 arch/sh/kernel/io.c *(u32 *)to = *(volatile u32 *)from; from 62 arch/sh/kernel/io.c from += 4; from 67 arch/sh/kernel/io.c *(u8 *)to = *(volatile u8 *)from; from 69 arch/sh/kernel/io.c from++; from 79 arch/sh/kernel/io.c void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count) from 81 arch/sh/kernel/io.c if ((((u32)to | (u32)from) & 0x3) == 0) { from 83 arch/sh/kernel/io.c *(volatile u32 *)to = *(u32 *)from; from 85 arch/sh/kernel/io.c from += 4; from 90 arch/sh/kernel/io.c *(volatile u8 *)to = *(u8 *)from; from 92 arch/sh/kernel/io.c from++; from 39 arch/sh/kernel/machvec.c static int __init early_parse_mv(char *from) from 47 arch/sh/kernel/machvec.c mv_end = strchr(from, ' '); from 49 arch/sh/kernel/machvec.c mv_end = from + strlen(from); from 51 arch/sh/kernel/machvec.c mv_comma = strchr(from, ','); from 52 arch/sh/kernel/machvec.c mv_len = mv_end - from; from 55 arch/sh/kernel/machvec.c memcpy(mv_name, from, mv_len); from 57 arch/sh/kernel/machvec.c from = mv_end; from 121 arch/sh/kernel/traps_32.c if (ma->from(dst, srcu, count)) from 169 arch/sh/kernel/traps_32.c if (ma->from(dst, srcu, 4)) from 184 arch/sh/kernel/traps_32.c if (ma->from(dst, srcu, count)) from 214 arch/sh/kernel/traps_32.c if (ma->from(dst, srcu, 2)) from 233 arch/sh/kernel/traps_32.c if (ma->from(dst, srcu, 2)) from 246 arch/sh/kernel/traps_32.c if (ma->from(dst, srcu, 4)) from 95 arch/sh/mm/cache.c void copy_user_highpage(struct page *to, struct page *from, from 102 arch/sh/mm/cache.c if (boot_cpu_data.dcache.n_aliases && page_mapcount(from) && from 103 arch/sh/mm/cache.c test_bit(PG_dcache_clean, &from->flags)) { from 104 arch/sh/mm/cache.c vfrom = kmap_coherent(from, vaddr); from 108 arch/sh/mm/cache.c vfrom = kmap_atomic(from); from 21 arch/sh/mm/nommu.c void copy_page(void *to, void *from) from 23 arch/sh/mm/nommu.c memcpy(to, from, PAGE_SIZE); from 26 arch/sh/mm/nommu.c __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) from 28 arch/sh/mm/nommu.c memcpy(to, from, n); from 55 arch/sparc/include/asm/mdesc.h u64 mdesc_next_arc(struct mdesc_handle *handle, u64 from, from 27 arch/sparc/include/asm/openprom.h int (*v0_seekdev)(int dev_desc, long logical_offst, int from); from 21 arch/sparc/include/asm/page_32.h #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) from 26 arch/sparc/include/asm/page_32.h #define copy_user_page(to, from, vaddr, page) \ from 27 arch/sparc/include/asm/page_32.h do { copy_page(to, from); \ from 50 arch/sparc/include/asm/page_64.h void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage); from 53 arch/sparc/include/asm/page_64.h void copy_user_highpage(struct page *to, struct page *from, from 56 arch/sparc/include/asm/page_64.h void copy_highpage(struct page *to, struct page *from); from 414 arch/sparc/include/asm/pgtable_32.h unsigned long from, unsigned long pfn, from 423 arch/sparc/include/asm/pgtable_32.h return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); from 1068 arch/sparc/include/asm/pgtable_64.h unsigned long from, unsigned long pfn, from 1077 arch/sparc/include/asm/pgtable_64.h return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); from 235 arch/sparc/include/asm/uaccess_32.h unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size); from 237 arch/sparc/include/asm/uaccess_32.h static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 239 arch/sparc/include/asm/uaccess_32.h return __copy_user(to, (__force void __user *) from, n); from 242 arch/sparc/include/asm/uaccess_32.h static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 244 arch/sparc/include/asm/uaccess_32.h return __copy_user((__force void __user *) to, from, n); from 179 arch/sparc/include/asm/uaccess_64.h const void __user *from, from 183 arch/sparc/include/asm/uaccess_64.h const void *from, from 189 arch/sparc/include/asm/uaccess_64.h const void __user *from, from 703 arch/sparc/kernel/mdesc.c u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type) from 709 arch/sparc/kernel/mdesc.c if (from == MDESC_NODE_NULL || from >= last_node) from 712 arch/sparc/kernel/mdesc.c ep = base + from; from 1613 arch/sparc/kernel/smp_64.c static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) from 1615 arch/sparc/kernel/smp_64.c if (cpu_to_node(from) == cpu_to_node(to)) from 1374 arch/sparc/mm/init_64.c int __node_distance(int from, int to) from 1376 arch/sparc/mm/init_64.c if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) { from 1378 arch/sparc/mm/init_64.c from, to); from 1379 arch/sparc/mm/init_64.c return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE; from 1381 arch/sparc/mm/init_64.c return numa_latency[from][to]; from 3128 arch/sparc/mm/init_64.c void copy_user_highpage(struct page *to, struct page *from, from 3133 arch/sparc/mm/init_64.c vfrom = kmap_atomic(from); from 3145 arch/sparc/mm/init_64.c pfrom = page_to_phys(from); from 3163 arch/sparc/mm/init_64.c void copy_highpage(struct page *to, struct page *from) from 3167 arch/sparc/mm/init_64.c vfrom = kmap_atomic(from); from 3179 arch/sparc/mm/init_64.c pfrom = page_to_phys(from); from 261 arch/sparc/net/bpf_jit_comp_64.c static void emit_reg_move(u32 from, u32 to, struct jit_ctx *ctx) from 263 arch/sparc/net/bpf_jit_comp_64.c emit(OR | RS1(G0) | RS2(from) | RD(to), ctx); from 145 arch/um/drivers/cow_user.c static int absolutize(char *to, int size, char *from) from 155 arch/um/drivers/cow_user.c slash = strrchr(from, '/'); from 158 arch/um/drivers/cow_user.c if (chdir(from)) { from 161 arch/um/drivers/cow_user.c "errno = %d\n", from, errno); from 167 arch/um/drivers/cow_user.c "errno = %d\n", from, errno); from 173 arch/um/drivers/cow_user.c "chars\n", from, size); from 179 arch/um/drivers/cow_user.c if (strlen(save_cwd) + 1 + strlen(from) + 1 > size) { from 181 arch/um/drivers/cow_user.c "chars\n", from, size); from 186 arch/um/drivers/cow_user.c strcat(to, from); from 30 arch/um/include/asm/page.h #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) from 33 arch/um/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) from 45 arch/um/include/asm/page.h #define pte_copy(to, from) ({ (to).pte = (from).pte; }) from 72 arch/um/include/asm/page.h #define pte_copy(to, from) ((to).pte = (from).pte) from 24 arch/um/include/asm/uaccess.h extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n); from 25 arch/um/include/asm/uaccess.h extern unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n); from 54 arch/um/include/shared/kern_util.h extern int copy_from_user_proc(void *to, void *from, int size); from 86 arch/um/kernel/process.c void *__switch_to(struct task_struct *from, struct task_struct *to) from 88 arch/um/kernel/process.c to->thread.prev_sched = from; from 91 arch/um/kernel/process.c switch_threads(&from->thread.switch_buf, &to->thread.switch_buf); from 291 arch/um/kernel/process.c int copy_to_user_proc(void __user *to, void *from, int size) from 293 arch/um/kernel/process.c return copy_to_user(to, from, size); from 296 arch/um/kernel/process.c int copy_from_user_proc(void *to, void __user *from, int size) from 298 arch/um/kernel/process.c return copy_from_user(to, from, size); from 133 arch/um/kernel/skas/uaccess.c static int copy_chunk_from_user(unsigned long from, int len, void *arg) from 137 arch/um/kernel/skas/uaccess.c memcpy((void *) to, (void *) from, len); from 142 arch/um/kernel/skas/uaccess.c unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 145 arch/um/kernel/skas/uaccess.c memcpy(to, (__force void*)from, n); from 149 arch/um/kernel/skas/uaccess.c return buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to); from 155 arch/um/kernel/skas/uaccess.c unsigned long *from_ptr = arg, from = *from_ptr; from 157 arch/um/kernel/skas/uaccess.c memcpy((void *) to, (void *) from, len); from 162 arch/um/kernel/skas/uaccess.c unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 165 arch/um/kernel/skas/uaccess.c memcpy((__force void *) to, from, n); from 169 arch/um/kernel/skas/uaccess.c return buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from); from 173 arch/um/kernel/skas/uaccess.c static int strncpy_chunk_from_user(unsigned long from, int len, void *arg) from 178 arch/um/kernel/skas/uaccess.c strncpy(to, (void *) from, len); from 23 arch/unicore32/include/asm/page.h extern void copy_page(void *to, const void *from); from 26 arch/unicore32/include/asm/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) from 14 arch/unicore32/include/asm/traps.h unsigned long from, unsigned long frame); from 24 arch/unicore32/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n); from 26 arch/unicore32/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n); from 30 arch/unicore32/include/asm/uaccess.h __strncpy_from_user(char *to, const char __user *from, unsigned long count); from 240 arch/unicore32/kernel/setup.c char *from = default_command_line; from 250 arch/unicore32/kernel/setup.c strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); from 36 arch/unicore32/kernel/traps.c unsigned long from, unsigned long frame) from 40 arch/unicore32/kernel/traps.c where, (void *)where, from, (void *)from); from 43 arch/unicore32/kernel/traps.c where, from); from 585 arch/x86/events/intel/ds.c u64 from; from 632 arch/x86/events/intel/ds.c (kernel_ip(at->from) || kernel_ip(at->to))) from 651 arch/x86/events/intel/ds.c (kernel_ip(at->from) || kernel_ip(at->to))) from 654 arch/x86/events/intel/ds.c data.ip = at->from; from 1202 arch/x86/events/intel/ds.c unsigned long from = cpuc->lbr_entries[0].from; from 1218 arch/x86/events/intel/ds.c if (!cpuc->lbr_stack.nr || !from || !to) from 1238 arch/x86/events/intel/ds.c set_linear_ip(regs, from); from 392 arch/x86/events/intel/lbr.c u64 tos, from; from 404 arch/x86/events/intel/lbr.c from = rdlbr_from(lbr_idx); from 405 arch/x86/events/intel/lbr.c if (!from) from 407 arch/x86/events/intel/lbr.c task_ctx->lbr_from[i] = from; from 546 arch/x86/events/intel/lbr.c u32 from; from 554 arch/x86/events/intel/lbr.c cpuc->lbr_entries[i].from = msr_lastbranch.from; from 590 arch/x86/events/intel/lbr.c u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0; from 595 arch/x86/events/intel/lbr.c from = rdlbr_from(lbr_idx); from 602 arch/x86/events/intel/lbr.c if (call_stack && !from) from 617 arch/x86/events/intel/lbr.c mis = !!(from & LBR_FROM_FLAG_MISPRED); from 626 arch/x86/events/intel/lbr.c mis = !!(from & LBR_FROM_FLAG_MISPRED); from 631 arch/x86/events/intel/lbr.c in_tx = !!(from & LBR_FROM_FLAG_IN_TX); from 632 arch/x86/events/intel/lbr.c abort = !!(from & LBR_FROM_FLAG_ABORT); from 635 arch/x86/events/intel/lbr.c from = (u64)((((s64)from) << skip) >> skip); from 648 arch/x86/events/intel/lbr.c cpuc->lbr_entries[out].from = from; from 832 arch/x86/events/intel/lbr.c static int branch_type(unsigned long from, unsigned long to, int abort) from 843 arch/x86/events/intel/lbr.c from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER; from 849 arch/x86/events/intel/lbr.c if (from == 0 || to == 0) from 864 arch/x86/events/intel/lbr.c bytes_left = copy_from_user_nmi(buf, (void __user *)from, from 878 arch/x86/events/intel/lbr.c if (kernel_text_address(from)) { from 879 arch/x86/events/intel/lbr.c addr = (void *)from; from 1044 arch/x86/events/intel/lbr.c u64 from, to; from 1056 arch/x86/events/intel/lbr.c from = cpuc->lbr_entries[i].from; from 1059 arch/x86/events/intel/lbr.c type = branch_type(from, to, cpuc->lbr_entries[i].abort); from 1069 arch/x86/events/intel/lbr.c cpuc->lbr_entries[i].from = 0; from 1082 arch/x86/events/intel/lbr.c if (!cpuc->lbr_entries[i].from) { from 1087 arch/x86/events/intel/lbr.c if (!cpuc->lbr_entries[i].from) from 1104 arch/x86/events/intel/lbr.c e->from = lbr->lbr[i].from; from 122 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_HANDLE(from, to, handler) \ from 125 arch/x86/include/asm/asm.h .long (from) - . ; \ from 130 arch/x86/include/asm/asm.h # define _ASM_EXTABLE(from, to) \ from 131 arch/x86/include/asm/asm.h _ASM_EXTABLE_HANDLE(from, to, ex_handler_default) from 133 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_UA(from, to) \ from 134 arch/x86/include/asm/asm.h _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess) from 136 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_FAULT(from, to) \ from 137 arch/x86/include/asm/asm.h _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) from 139 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_EX(from, to) \ from 140 arch/x86/include/asm/asm.h _ASM_EXTABLE_HANDLE(from, to, ex_handler_ext) from 142 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_REFCOUNT(from, to) \ from 143 arch/x86/include/asm/asm.h _ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount) from 153 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_HANDLE(from, to, handler) \ from 156 arch/x86/include/asm/asm.h " .long (" #from ") - .\n" \ from 161 arch/x86/include/asm/asm.h # define _ASM_EXTABLE(from, to) \ from 162 arch/x86/include/asm/asm.h _ASM_EXTABLE_HANDLE(from, to, ex_handler_default) from 164 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_UA(from, to) \ from 165 arch/x86/include/asm/asm.h _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess) from 167 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_FAULT(from, to) \ from 168 arch/x86/include/asm/asm.h _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) from 170 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_EX(from, to) \ from 171 arch/x86/include/asm/asm.h _ASM_EXTABLE_HANDLE(from, to, ex_handler_ext) from 173 arch/x86/include/asm/asm.h # define _ASM_EXTABLE_REFCOUNT(from, to) \ from 174 arch/x86/include/asm/asm.h _ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount) from 235 arch/x86/include/asm/compat.h const kernel_siginfo_t *from, bool x32_ABI); from 11 arch/x86/include/asm/mmx.h extern void *_mmx_memcpy(void *to, const void *from, size_t size); from 13 arch/x86/include/asm/mmx.h extern void mmx_copy_page(void *to, void *from); from 35 arch/x86/include/asm/numa.h extern void __init numa_set_distance(int from, int to, int distance); from 31 arch/x86/include/asm/page.h static inline void copy_user_page(void *to, void *from, unsigned long vaddr, from 34 arch/x86/include/asm/page.h copy_page(to, from); from 30 arch/x86/include/asm/page_32.h static inline void copy_page(void *to, void *from) from 32 arch/x86/include/asm/page_32.h mmx_copy_page(to, from); from 42 arch/x86/include/asm/page_32.h static inline void copy_page(void *to, void *from) from 44 arch/x86/include/asm/page_32.h memcpy(to, from, PAGE_SIZE); from 57 arch/x86/include/asm/page_64.h void copy_page(void *to, void *from); from 215 arch/x86/include/asm/perf_event.h u64 from, to, info; from 33 arch/x86/include/asm/string_32.h static __always_inline void *__memcpy(void *to, const void *from, size_t n) from 43 arch/x86/include/asm/string_32.h : "0" (n / 4), "g" (n), "1" ((long)to), "2" ((long)from) from 52 arch/x86/include/asm/string_32.h static __always_inline void *__constant_memcpy(void *to, const void *from, from 61 arch/x86/include/asm/string_32.h *(char *)to = *(char *)from; from 64 arch/x86/include/asm/string_32.h *(short *)to = *(short *)from; from 67 arch/x86/include/asm/string_32.h *(int *)to = *(int *)from; from 70 arch/x86/include/asm/string_32.h *(short *)to = *(short *)from; from 71 arch/x86/include/asm/string_32.h *((char *)to + 2) = *((char *)from + 2); from 74 arch/x86/include/asm/string_32.h *(int *)to = *(int *)from; from 75 arch/x86/include/asm/string_32.h *((char *)to + 4) = *((char *)from + 4); from 78 arch/x86/include/asm/string_32.h *(int *)to = *(int *)from; from 79 arch/x86/include/asm/string_32.h *((short *)to + 2) = *((short *)from + 2); from 82 arch/x86/include/asm/string_32.h *(int *)to = *(int *)from; from 83 arch/x86/include/asm/string_32.h *((int *)to + 1) = *((int *)from + 1); from 87 arch/x86/include/asm/string_32.h esi = (long)from; from 157 arch/x86/include/asm/string_32.h static inline void *__constant_memcpy3d(void *to, const void *from, size_t len) from 160 arch/x86/include/asm/string_32.h return __constant_memcpy(to, from, len); from 161 arch/x86/include/asm/string_32.h return _mmx_memcpy(to, from, len); from 164 arch/x86/include/asm/string_32.h static inline void *__memcpy3d(void *to, const void *from, size_t len) from 167 arch/x86/include/asm/string_32.h return __memcpy(to, from, len); from 168 arch/x86/include/asm/string_32.h return _mmx_memcpy(to, from, len); from 14 arch/x86/include/asm/string_64.h extern void *memcpy(void *to, const void *from, size_t len); from 15 arch/x86/include/asm/string_64.h extern void *__memcpy(void *to, const void *from, size_t len); from 18 arch/x86/include/asm/syscalls.h long ksys_ioperm(unsigned long from, unsigned long num, int turn_on); from 578 arch/x86/include/asm/uaccess.h copy_from_user_nmi(void *to, const void __user *from, unsigned long n); from 13 arch/x86/include/asm/uaccess_32.h (void *to, const void *from, unsigned long n); from 15 arch/x86/include/asm/uaccess_32.h (void *to, const void __user *from, unsigned long n); from 18 arch/x86/include/asm/uaccess_32.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 20 arch/x86/include/asm/uaccess_32.h return __copy_user_ll((__force void *)to, from, n); from 24 arch/x86/include/asm/uaccess_32.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 33 arch/x86/include/asm/uaccess_32.h __get_user_asm_nozero(*(u8 *)to, from, ret, from 40 arch/x86/include/asm/uaccess_32.h __get_user_asm_nozero(*(u16 *)to, from, ret, from 47 arch/x86/include/asm/uaccess_32.h __get_user_asm_nozero(*(u32 *)to, from, ret, from 53 arch/x86/include/asm/uaccess_32.h return __copy_user_ll(to, (__force const void *)from, n); from 57 arch/x86/include/asm/uaccess_32.h __copy_from_user_inatomic_nocache(void *to, const void __user *from, from 60 arch/x86/include/asm/uaccess_32.h return __copy_from_user_ll_nocache_nozero(to, from, n); from 21 arch/x86/include/asm/uaccess_64.h copy_user_enhanced_fast_string(void *to, const void *from, unsigned len); from 23 arch/x86/include/asm/uaccess_64.h copy_user_generic_string(void *to, const void *from, unsigned len); from 25 arch/x86/include/asm/uaccess_64.h copy_user_generic_unrolled(void *to, const void *from, unsigned len); from 28 arch/x86/include/asm/uaccess_64.h copy_user_generic(void *to, const void *from, unsigned len) from 42 arch/x86/include/asm/uaccess_64.h ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), from 44 arch/x86/include/asm/uaccess_64.h "1" (to), "2" (from), "3" (len) from 50 arch/x86/include/asm/uaccess_64.h copy_to_user_mcsafe(void *to, const void *from, unsigned len) from 60 arch/x86/include/asm/uaccess_64.h ret = __memcpy_mcsafe(to, from, len); from 211 arch/x86/include/asm/uaccess_64.h mcsafe_handle_tail(char *to, char *from, unsigned len); from 2438 arch/x86/kernel/apic/io_apic.c unsigned int arch_dynirq_lower_bound(unsigned int from) from 2450 arch/x86/kernel/apic/io_apic.c return ioapic_dynirq_base ? : from; from 87 arch/x86/kernel/cpu/hypervisor.c const void * const *from = (const void * const *)src; from 91 arch/x86/kernel/cpu/hypervisor.c if (from[i]) from 92 arch/x86/kernel/cpu/hypervisor.c to[i] = from[i]; from 2182 arch/x86/kernel/cpu/resctrl/rdtgroup.c static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, from 2189 arch/x86/kernel/cpu/resctrl/rdtgroup.c if (!from || is_closid_match(t, from) || from 2190 arch/x86/kernel/cpu/resctrl/rdtgroup.c is_rmid_match(t, from)) { from 944 arch/x86/kernel/e820.c enum e820_type from = 0, to = 0; from 948 arch/x86/kernel/e820.c from = simple_strtoull(p + 1, &p, 0); from 953 arch/x86/kernel/e820.c if (from && to) from 954 arch/x86/kernel/e820.c e820__range_update(start_at, mem_size, from, to); from 957 arch/x86/kernel/e820.c else if (from) from 958 arch/x86/kernel/e820.c e820__range_remove(start_at, mem_size, from, 1); from 236 arch/x86/kernel/fpu/regset.c struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; from 265 arch/x86/kernel/fpu/regset.c memcpy(&to[i], &from[i], sizeof(to[0])); from 272 arch/x86/kernel/fpu/regset.c struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; from 292 arch/x86/kernel/fpu/regset.c memcpy(&to[i], &from[i], sizeof(from[0])); from 969 arch/x86/kernel/fpu/xstate.c static void copy_part(unsigned offset, unsigned size, void *from, from 976 arch/x86/kernel/fpu/xstate.c memcpy(*kbuf, from, size); from 27 arch/x86/kernel/ioport.c long ksys_ioperm(unsigned long from, unsigned long num, int turn_on) from 33 arch/x86/kernel/ioport.c if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) from 75 arch/x86/kernel/ioport.c bitmap_clear(t->io_bitmap_ptr, from, num); from 77 arch/x86/kernel/ioport.c bitmap_set(t->io_bitmap_ptr, from, num); from 101 arch/x86/kernel/ioport.c SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on) from 103 arch/x86/kernel/ioport.c return ksys_ioperm(from, num, turn_on); from 91 arch/x86/kernel/kprobes/common.h extern void synthesize_reljump(void *dest, void *from, void *to); from 92 arch/x86/kernel/kprobes/common.h extern void synthesize_relcall(void *dest, void *from, void *to); from 107 arch/x86/kernel/kprobes/core.c __synthesize_relative_insn(void *dest, void *from, void *to, u8 op) from 115 arch/x86/kernel/kprobes/core.c insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); from 120 arch/x86/kernel/kprobes/core.c void synthesize_reljump(void *dest, void *from, void *to) from 122 arch/x86/kernel/kprobes/core.c __synthesize_relative_insn(dest, from, to, RELATIVEJUMP_OPCODE); from 127 arch/x86/kernel/kprobes/core.c void synthesize_relcall(void *dest, void *from, void *to) from 129 arch/x86/kernel/kprobes/core.c __synthesize_relative_insn(dest, from, to, RELATIVECALL_OPCODE); from 141 arch/x86/kernel/setup_percpu.c static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) from 144 arch/x86/kernel/setup_percpu.c if (early_cpu_to_node(from) == early_cpu_to_node(to)) from 3321 arch/x86/kvm/svm.c struct vmcb_control_area *from = &from_vmcb->control; from 3323 arch/x86/kvm/svm.c dst->intercept_cr = from->intercept_cr; from 3324 arch/x86/kvm/svm.c dst->intercept_dr = from->intercept_dr; from 3325 arch/x86/kvm/svm.c dst->intercept_exceptions = from->intercept_exceptions; from 3326 arch/x86/kvm/svm.c dst->intercept = from->intercept; from 3327 arch/x86/kvm/svm.c dst->iopm_base_pa = from->iopm_base_pa; from 3328 arch/x86/kvm/svm.c dst->msrpm_base_pa = from->msrpm_base_pa; from 3329 arch/x86/kvm/svm.c dst->tsc_offset = from->tsc_offset; from 3331 arch/x86/kvm/svm.c dst->tlb_ctl = from->tlb_ctl; from 3332 arch/x86/kvm/svm.c dst->int_ctl = from->int_ctl; from 3333 arch/x86/kvm/svm.c dst->int_vector = from->int_vector; from 3334 arch/x86/kvm/svm.c dst->int_state = from->int_state; from 3335 arch/x86/kvm/svm.c dst->exit_code = from->exit_code; from 3336 arch/x86/kvm/svm.c dst->exit_code_hi = from->exit_code_hi; from 3337 arch/x86/kvm/svm.c dst->exit_info_1 = from->exit_info_1; from 3338 arch/x86/kvm/svm.c dst->exit_info_2 = from->exit_info_2; from 3339 arch/x86/kvm/svm.c dst->exit_int_info = from->exit_int_info; from 3340 arch/x86/kvm/svm.c dst->exit_int_info_err = from->exit_int_info_err; from 3341 arch/x86/kvm/svm.c dst->nested_ctl = from->nested_ctl; from 3342 arch/x86/kvm/svm.c dst->event_inj = from->event_inj; from 3343 arch/x86/kvm/svm.c dst->event_inj_err = from->event_inj_err; from 3344 arch/x86/kvm/svm.c dst->nested_cr3 = from->nested_cr3; from 3345 arch/x86/kvm/svm.c dst->virt_ext = from->virt_ext; from 3346 arch/x86/kvm/svm.c dst->pause_filter_count = from->pause_filter_count; from 3347 arch/x86/kvm/svm.c dst->pause_filter_thresh = from->pause_filter_thresh; from 1629 arch/x86/kvm/vmx/vmx.c static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) from 1634 arch/x86/kvm/vmx/vmx.c vmx->guest_msrs[to] = vmx->guest_msrs[from]; from 1635 arch/x86/kvm/vmx/vmx.c vmx->guest_msrs[from] = tmp; from 5 arch/x86/lib/iomem.c #define movs(type,to,from) \ from 6 arch/x86/lib/iomem.c asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory") from 9 arch/x86/lib/iomem.c static __always_inline void rep_movs(void *to, const void *from, size_t n) from 21 arch/x86/lib/iomem.c : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from) from 25 arch/x86/lib/iomem.c void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) from 31 arch/x86/lib/iomem.c if (unlikely(1 & (unsigned long)from)) { from 32 arch/x86/lib/iomem.c movs("b", to, from); from 35 arch/x86/lib/iomem.c if (n > 1 && unlikely(2 & (unsigned long)from)) { from 36 arch/x86/lib/iomem.c movs("w", to, from); from 39 arch/x86/lib/iomem.c rep_movs(to, (const void *)from, n); from 43 arch/x86/lib/iomem.c void memcpy_toio(volatile void __iomem *to, const void *from, size_t n) from 50 arch/x86/lib/iomem.c movs("b", to, from); from 54 arch/x86/lib/iomem.c movs("w", to, from); from 57 arch/x86/lib/iomem.c rep_movs((void *)to, (const void *) from, n); from 8 arch/x86/lib/memcpy_32.c __visible void *memcpy(void *to, const void *from, size_t n) from 11 arch/x86/lib/memcpy_32.c return __memcpy3d(to, from, n); from 13 arch/x86/lib/memcpy_32.c return __memcpy(to, from, n); from 29 arch/x86/lib/mmx_32.c void *_mmx_memcpy(void *to, const void *from, size_t len) from 35 arch/x86/lib/mmx_32.c return __memcpy(to, from, len); from 54 arch/x86/lib/mmx_32.c : : "r" (from)); from 80 arch/x86/lib/mmx_32.c : : "r" (from), "r" (to) : "memory"); from 82 arch/x86/lib/mmx_32.c from += 64; from 104 arch/x86/lib/mmx_32.c : : "r" (from), "r" (to) : "memory"); from 106 arch/x86/lib/mmx_32.c from += 64; from 112 arch/x86/lib/mmx_32.c __memcpy(to, from, len & 63); from 159 arch/x86/lib/mmx_32.c static void fast_copy_page(void *to, void *from) from 180 arch/x86/lib/mmx_32.c _ASM_EXTABLE(1b, 3b) : : "r" (from)); from 205 arch/x86/lib/mmx_32.c _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); from 207 arch/x86/lib/mmx_32.c from += 64; from 229 arch/x86/lib/mmx_32.c : : "r" (from), "r" (to) : "memory"); from 230 arch/x86/lib/mmx_32.c from += 64; from 281 arch/x86/lib/mmx_32.c static void fast_copy_page(void *to, void *from) from 298 arch/x86/lib/mmx_32.c _ASM_EXTABLE(1b, 3b) : : "r" (from)); from 324 arch/x86/lib/mmx_32.c : : "r" (from), "r" (to) : "memory"); from 326 arch/x86/lib/mmx_32.c from += 64; from 359 arch/x86/lib/mmx_32.c static void slow_copy_page(void *to, void *from) from 367 arch/x86/lib/mmx_32.c : "0" (1024), "1" ((long) to), "2" ((long) from) from 371 arch/x86/lib/mmx_32.c void mmx_copy_page(void *to, void *from) from 374 arch/x86/lib/mmx_32.c slow_copy_page(to, from); from 376 arch/x86/lib/mmx_32.c fast_copy_page(to, from); from 17 arch/x86/lib/usercopy.c copy_from_user_nmi(void *to, const void __user *from, unsigned long n) from 21 arch/x86/lib/usercopy.c if (__range_not_ok(from, n, TASK_SIZE)) from 33 arch/x86/lib/usercopy.c ret = __copy_from_user_inatomic(to, from, n); from 97 arch/x86/lib/usercopy_32.c __copy_user_intel(void __user *to, const void *from, unsigned long size) from 195 arch/x86/lib/usercopy_32.c : "1"(to), "2"(from), "0"(size) from 201 arch/x86/lib/usercopy_32.c const void __user *from, unsigned long size) from 283 arch/x86/lib/usercopy_32.c : "1"(to), "2"(from), "0"(size) from 294 arch/x86/lib/usercopy_32.c unsigned long __copy_user_intel(void __user *to, const void *from, from 299 arch/x86/lib/usercopy_32.c #define __copy_user(to, from, size) \ from 328 arch/x86/lib/usercopy_32.c : "3"(size), "0"(size), "1"(to), "2"(from) \ from 332 arch/x86/lib/usercopy_32.c unsigned long __copy_user_ll(void *to, const void *from, unsigned long n) from 335 arch/x86/lib/usercopy_32.c if (movsl_is_ok(to, from, n)) from 336 arch/x86/lib/usercopy_32.c __copy_user(to, from, n); from 338 arch/x86/lib/usercopy_32.c n = __copy_user_intel(to, from, n); from 344 arch/x86/lib/usercopy_32.c unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, from 350 arch/x86/lib/usercopy_32.c n = __copy_user_intel_nocache(to, from, n); from 352 arch/x86/lib/usercopy_32.c __copy_user(to, from, n); from 354 arch/x86/lib/usercopy_32.c __copy_user(to, from, n); from 64 arch/x86/lib/usercopy_64.c mcsafe_handle_tail(char *to, char *from, unsigned len) from 66 arch/x86/lib/usercopy_64.c for (; len; --len, to++, from++) { from 71 arch/x86/lib/usercopy_64.c unsigned long rem = __memcpy_mcsafe(to, from, 1); from 205 arch/x86/lib/usercopy_64.c char *from = kmap_atomic(page); from 207 arch/x86/lib/usercopy_64.c memcpy_flushcache(to, from + offset, len); from 208 arch/x86/lib/usercopy_64.c kunmap_atomic(from); from 110 arch/x86/math-emu/fpu_system.h #define FPU_copy_from_user(to, from, n) \ from 111 arch/x86/math-emu/fpu_system.h do { if (copy_from_user(to, from, n)) FPU_abort; } while (0) from 402 arch/x86/mm/numa.c void __init numa_set_distance(int from, int to, int distance) from 407 arch/x86/mm/numa.c if (from >= numa_distance_cnt || to >= numa_distance_cnt || from 408 arch/x86/mm/numa.c from < 0 || to < 0) { from 410 arch/x86/mm/numa.c from, to, distance); from 415 arch/x86/mm/numa.c (from == to && distance != LOCAL_DISTANCE)) { from 417 arch/x86/mm/numa.c from, to, distance); from 421 arch/x86/mm/numa.c numa_distance[from * numa_distance_cnt + to] = distance; from 424 arch/x86/mm/numa.c int __node_distance(int from, int to) from 426 arch/x86/mm/numa.c if (from >= numa_distance_cnt || to >= numa_distance_cnt) from 427 arch/x86/mm/numa.c return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; from 428 arch/x86/mm/numa.c return numa_distance[from * numa_distance_cnt + to]; from 808 arch/x86/mm/pat.c u64 from = ((u64)pfn) << PAGE_SHIFT; from 809 arch/x86/mm/pat.c u64 to = from + size; from 810 arch/x86/mm/pat.c u64 cursor = from; from 44 arch/x86/um/asm/processor_32.h static inline void arch_copy_thread(struct arch_thread *from, from 47 arch/x86/um/asm/processor_32.h memcpy(&to->tls_array, &from->tls_array, sizeof(from->tls_array)); from 28 arch/x86/um/asm/processor_64.h static inline void arch_copy_thread(struct arch_thread *from, from 31 arch/x86/um/asm/processor_64.h to->fs = from->fs; from 87 arch/x86/um/signal.c struct _fpxreg *from; from 102 arch/x86/um/signal.c from = (struct _fpxreg *) &fxsave->st_space[0]; from 103 arch/x86/um/signal.c for (i = 0; i < 8; i++, to++, from++) { from 105 arch/x86/um/signal.c unsigned long *f = (unsigned long *)from; from 109 arch/x86/um/signal.c __put_user(from->exponent, &to->exponent)) from 120 arch/x86/um/signal.c struct _fpreg __user *from; from 136 arch/x86/um/signal.c from = &buf->_st[0]; from 137 arch/x86/um/signal.c for (i = 0; i < 8; i++, to++, from++) { from 139 arch/x86/um/signal.c unsigned long __user *f = (unsigned long __user *)from; from 143 arch/x86/um/signal.c __get_user(to->exponent, &from->exponent)) from 154 arch/x86/um/signal.c struct sigcontext __user *from) from 162 arch/x86/um/signal.c err = copy_from_user(&sc, from, sizeof(sc)); from 681 arch/x86/xen/setup.c void *from, *to; from 694 arch/x86/xen/setup.c from = early_memremap(src - src_off, src_len + src_off); from 695 arch/x86/xen/setup.c memcpy(to, from, len); from 697 arch/x86/xen/setup.c early_memunmap(from, src_len + src_off); from 133 arch/xtensa/include/asm/page.h extern void copy_page(void *to, void *from); from 142 arch/xtensa/include/asm/page.h extern void copy_page_alias(void *to, void *from, from 148 arch/xtensa/include/asm/page.h void copy_user_highpage(struct page *to, struct page *from, from 152 arch/xtensa/include/asm/page.h # define copy_user_page(to, from, vaddr, pg) copy_page(to, from) from 248 arch/xtensa/include/asm/uaccess.h extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); from 251 arch/xtensa/include/asm/uaccess.h raw_copy_from_user(void *to, const void __user *from, unsigned long n) from 254 arch/xtensa/include/asm/uaccess.h return __xtensa_copy_user(to, (__force const void *)from, n); from 257 arch/xtensa/include/asm/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 259 arch/xtensa/include/asm/uaccess.h prefetch(from); from 260 arch/xtensa/include/asm/uaccess.h return __xtensa_copy_user((__force void *)to, from, n); from 76 block/bfq-cgroup.c struct bfq_stat *from) from 78 block/bfq-cgroup.c atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt), from 369 block/bfq-cgroup.c static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) from 371 block/bfq-cgroup.c if (!to || !from) from 376 block/bfq-cgroup.c blkg_rwstat_add_aux(&to->merged, &from->merged); from 377 block/bfq-cgroup.c blkg_rwstat_add_aux(&to->service_time, &from->service_time); from 378 block/bfq-cgroup.c blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); from 379 block/bfq-cgroup.c bfq_stat_add_aux(&from->time, &from->time); from 380 block/bfq-cgroup.c bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); from 382 block/bfq-cgroup.c &from->avg_queue_size_samples); from 383 block/bfq-cgroup.c bfq_stat_add_aux(&to->dequeue, &from->dequeue); from 384 block/bfq-cgroup.c bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time); from 385 block/bfq-cgroup.c bfq_stat_add_aux(&to->idle_time, &from->idle_time); from 386 block/bfq-cgroup.c bfq_stat_add_aux(&to->empty_time, &from->empty_time); from 132 block/bounce.c static void copy_to_high_bio_irq(struct bio *to, struct bio *from) from 145 block/bounce.c fromvec = bio_iter_iovec(from, from_iter); from 158 block/bounce.c bio_advance_iter(from, &from_iter, tovec.bv_len); from 291 block/bounce.c struct bio_vec *to, from; from 298 block/bounce.c bio_for_each_segment(from, *bio_orig, iter) { from 300 block/bounce.c sectors += from.bv_len >> 9; from 301 block/bounce.c if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn) from 36 block/cmdline-parser.c new_subpart->from = (sector_t)memparse(partdef, &partdef); from 38 block/cmdline-parser.c new_subpart->from = (sector_t)(~0ULL); from 231 block/cmdline-parser.c sector_t from = 0; from 236 block/cmdline-parser.c if (subpart->from == (sector_t)(~0ULL)) from 237 block/cmdline-parser.c subpart->from = from; from 239 block/cmdline-parser.c from = subpart->from; from 241 block/cmdline-parser.c if (from >= disk_size) from 244 block/cmdline-parser.c if (subpart->size > (disk_size - from)) from 245 block/cmdline-parser.c subpart->size = disk_size - from; from 247 block/cmdline-parser.c from += subpart->size; from 464 block/partition-generic.c sector_t from, sector_t size) from 490 block/partition-generic.c div_u64_rem(from, zone_sectors, &rem); from 493 block/partition-generic.c if ((from + size) < get_capacity(disk)) { from 501 block/partition-generic.c if (from & (zone_sectors - 1)) from 503 block/partition-generic.c if ((from + size) < get_capacity(disk) && from 575 block/partition-generic.c sector_t size, from; from 581 block/partition-generic.c from = state->parts[p].from; from 582 block/partition-generic.c if (from >= get_capacity(disk)) { from 585 block/partition-generic.c disk->disk_name, p, (unsigned long long) from); from 591 block/partition-generic.c if (from + size > get_capacity(disk)) { from 606 block/partition-generic.c size = get_capacity(disk) - from; from 617 block/partition-generic.c !part_zone_aligned(disk, bdev, from, size)) { from 620 block/partition-generic.c disk->disk_name, p, (unsigned long long) from, from 625 block/partition-generic.c part = add_partition(disk, p, from, size, from 14 block/partitions/check.h sector_t from; from 42 block/partitions/check.h put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size) from 47 block/partitions/check.h p->parts[n].from = from; from 36 block/partitions/cmdline.c put_partition(state, slot, subpart->from >> 9, from 61 block/partitions/cmdline.c static bool has_overlaps(sector_t from, sector_t size, from 64 block/partitions/cmdline.c sector_t end = from + size; from 67 block/partitions/cmdline.c if (from >= from2 && from < end2) from 73 block/partitions/cmdline.c if (from2 >= from && from2 < end) from 76 block/partitions/cmdline.c if (end2 > from && end2 <= end) from 96 block/partitions/cmdline.c if (has_overlaps(state->parts[slot].from, from 98 block/partitions/cmdline.c state->parts[i].from, from 107 block/partitions/cmdline.c (u64)state->parts[slot].from << 9, from 110 block/partitions/cmdline.c (u64)state->parts[i].from << 9, from 177 crypto/ecc.c const u64 *from = src; from 180 crypto/ecc.c dest[i] = get_unaligned_be64(&from[ndigits - 1 - i]); from 187 crypto/ecc.c const u64 *from = src; from 190 crypto/ecc.c dest[i] = get_unaligned_le64(&from[i]); from 575 drivers/android/binder.c struct binder_thread *from; from 1784 drivers/android/binder.c BUG_ON(target_thread->transaction_stack->from != target_thread); from 1787 drivers/android/binder.c t->from = NULL; from 1856 drivers/android/binder.c struct binder_thread *from; from 1859 drivers/android/binder.c from = t->from; from 1860 drivers/android/binder.c if (from) from 1861 drivers/android/binder.c atomic_inc(&from->tmp_ref); from 1863 drivers/android/binder.c return from; from 1879 drivers/android/binder.c __acquires(&t->from->proc->inner_lock) from 1881 drivers/android/binder.c struct binder_thread *from; from 1883 drivers/android/binder.c from = binder_get_txn_from(t); from 1884 drivers/android/binder.c if (!from) { from 1885 drivers/android/binder.c __acquire(&from->proc->inner_lock); from 1888 drivers/android/binder.c binder_inner_proc_lock(from->proc); from 1889 drivers/android/binder.c if (t->from) { from 1890 drivers/android/binder.c BUG_ON(from != t->from); from 1891 drivers/android/binder.c return from; from 1893 drivers/android/binder.c binder_inner_proc_unlock(from->proc); from 1894 drivers/android/binder.c __acquire(&from->proc->inner_lock); from 1895 drivers/android/binder.c binder_thread_dec_tmpref(from); from 3037 drivers/android/binder.c struct binder_thread *from; from 3040 drivers/android/binder.c from = tmp->from; from 3041 drivers/android/binder.c if (from && from->proc == target_proc) { from 3042 drivers/android/binder.c atomic_inc(&from->tmp_ref); from 3043 drivers/android/binder.c target_thread = from; from 3100 drivers/android/binder.c t->from = thread; from 3102 drivers/android/binder.c t->from = NULL; from 4756 drivers/android/binder.c } else if (t->from == thread) { from 4757 drivers/android/binder.c t->from = NULL; from 5554 drivers/android/binder.c t->from ? t->from->proc->pid : 0, from 5555 drivers/android/binder.c t->from ? t->from->pid : 0, from 5643 drivers/android/binder.c if (t->from == thread) { from 1105 drivers/android/binder_alloc.c const void __user *from, from 1122 drivers/android/binder_alloc.c ret = copy_from_user(kptr, from, size); from 1127 drivers/android/binder_alloc.c from += size; from 159 drivers/android/binder_alloc.h const void __user *from, from 98 drivers/base/regmap/regmap-debugfs.c loff_t from, from 166 drivers/base/regmap/regmap-debugfs.c if (from >= c->min && from <= c->max) { from 167 drivers/base/regmap/regmap-debugfs.c fpos_offset = from - c->min; from 216 drivers/base/regmap/regmap-debugfs.c static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, from 237 drivers/base/regmap/regmap-debugfs.c start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p); from 250 drivers/base/regmap/regmap-debugfs.c map->debugfs_reg_len, i - from); from 228 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, unsigned int from, from 231 drivers/base/regmap/trace.h TP_ARGS(map, from, to), from 235 drivers/base/regmap/trace.h __field( unsigned int, from ) from 241 drivers/base/regmap/trace.h __entry->from = from; from 245 drivers/base/regmap/trace.h TP_printk("%s %u-%u", __get_str(name), (unsigned int)__entry->from, from 647 drivers/block/ataflop.c static inline void copy_buffer(void *from, void *to) from 649 drivers/block/ataflop.c ulong *p1 = (ulong *)from, *p2 = (ulong *)to; from 523 drivers/block/nbd.c struct iov_iter from; from 531 drivers/block/nbd.c iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request)); from 560 drivers/block/nbd.c iov_iter_advance(&from, sent); from 569 drivers/block/nbd.c request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); from 580 drivers/block/nbd.c result = sock_xmit(nbd, index, 1, &from, from 617 drivers/block/nbd.c iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len); from 619 drivers/block/nbd.c if (skip >= iov_iter_count(&from)) { from 620 drivers/block/nbd.c skip -= iov_iter_count(&from); from 623 drivers/block/nbd.c iov_iter_advance(&from, skip); from 626 drivers/block/nbd.c result = sock_xmit(nbd, index, 1, &from, flags, &sent); from 1152 drivers/block/nbd.c struct iov_iter from; from 1158 drivers/block/nbd.c iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request)); from 1160 drivers/block/nbd.c ret = sock_xmit(nbd, i, 1, &from, 0, NULL); from 431 drivers/block/ps3vram.c static blk_status_t ps3vram_read(struct ps3_system_bus_device *dev, loff_t from, from 438 drivers/block/ps3vram.c (unsigned int)from, len); from 440 drivers/block/ps3vram.c if (from >= priv->size) from 443 drivers/block/ps3vram.c if (len > priv->size - from) from 444 drivers/block/ps3vram.c len = priv->size - from; from 452 drivers/block/ps3vram.c offset = (unsigned int) (from & (priv->cache.page_size - 1)); from 455 drivers/block/ps3vram.c entry = ps3vram_cache_match(dev, from); from 460 drivers/block/ps3vram.c (unsigned int)from, cached, offset, avail, count); from 468 drivers/block/ps3vram.c from += avail; from 894 drivers/block/rbd.c dout("got string token %d val %s\n", token, argstr[0].from); from 155 drivers/bluetooth/hci_vhci.c struct iov_iter *from) from 157 drivers/bluetooth/hci_vhci.c size_t len = iov_iter_count(from); from 169 drivers/bluetooth/hci_vhci.c if (!copy_from_iter_full(skb_put(skb, len), len, from)) { from 279 drivers/bluetooth/hci_vhci.c static ssize_t vhci_write(struct kiocb *iocb, struct iov_iter *from) from 284 drivers/bluetooth/hci_vhci.c return vhci_get_user(data, from); from 3095 drivers/cdrom/cdrom.c cgc->cmd[2] = (blk.from >> 24) & 0xff; from 3096 drivers/cdrom/cdrom.c cgc->cmd[3] = (blk.from >> 16) & 0xff; from 3097 drivers/cdrom/cdrom.c cgc->cmd[4] = (blk.from >> 8) & 0xff; from 3098 drivers/cdrom/cdrom.c cgc->cmd[5] = blk.from & 0xff; from 466 drivers/char/applicom.c unsigned char *from = (unsigned char *) &tmpmailbox; from 471 drivers/char/applicom.c writeb(*(from++), to++); from 488 drivers/char/applicom.c void __iomem *from = apbs[IndexCard].RamIO + RAM_TO_PC; from 502 drivers/char/applicom.c *(to++) = readb(from++); from 70 drivers/char/mem.c u64 from = ((u64)pfn) << PAGE_SHIFT; from 71 drivers/char/mem.c u64 to = from + size; from 72 drivers/char/mem.c u64 cursor = from; from 686 drivers/char/mem.c static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from) from 688 drivers/char/mem.c size_t count = iov_iter_count(from); from 689 drivers/char/mem.c iov_iter_advance(from, count); from 1811 drivers/cpufreq/intel_pstate.c int from = cpu->pstate.current_pstate; from 1825 drivers/cpufreq/intel_pstate.c from, from 139 drivers/crypto/bcm/util.c struct scatterlist *from = *from_sg; from 150 drivers/crypto/bcm/util.c for_each_sg(from, sg, from_nents, i) { from 278 drivers/crypto/ccp/ccp-ops.c static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) from 285 drivers/crypto/ccp/ccp-ops.c if (!from) from 297 drivers/crypto/ccp/ccp-ops.c nbytes, from); from 412 drivers/crypto/ccp/ccp-ops.c u32 byte_swap, bool from) from 422 drivers/crypto/ccp/ccp-ops.c if (from) { from 886 drivers/crypto/chelsio/chtls/chtls_io.c struct iov_iter *from, from 893 drivers/crypto/chelsio/chtls/chtls_io.c err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + from 906 drivers/crypto/chelsio/chtls/chtls_io.c static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from) from 908 drivers/crypto/chelsio/chtls/chtls_io.c if (copy_from_iter(thdr, sizeof(*thdr), from) != sizeof(*thdr)) from 551 drivers/dma/ti/edma.c static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to) from 553 drivers/dma/ti/edma.c if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to))) from 556 drivers/dma/ti/edma.c from = EDMA_CHAN_SLOT(from); from 558 drivers/dma/ti/edma.c if (from >= ecc->num_slots || to >= ecc->num_slots) from 561 drivers/dma/ti/edma.c edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000, from 946 drivers/firmware/dmi_scan.c const struct dmi_device *from) from 948 drivers/firmware/dmi_scan.c const struct list_head *head = from ? &from->list : &dmi_devices; from 340 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c uint64_t from = src_node_start, to = dst_node_start; from 359 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c &from); from 365 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c from += src_page_offset; from 378 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c r = amdgpu_copy_buffer(ring, from, to, cur_size, from 33 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c static void copy_pps_fields(struct drm_dsc_config *to, const struct drm_dsc_config *from) from 35 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->line_buf_depth = from->line_buf_depth; from 36 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->bits_per_component = from->bits_per_component; from 37 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->convert_rgb = from->convert_rgb; from 38 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->slice_width = from->slice_width; from 39 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->slice_height = from->slice_height; from 40 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->simple_422 = from->simple_422; from 41 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->native_422 = from->native_422; from 42 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->native_420 = from->native_420; from 43 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->pic_width = from->pic_width; from 44 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->pic_height = from->pic_height; from 45 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->rc_tgt_offset_high = from->rc_tgt_offset_high; from 46 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->rc_tgt_offset_low = from->rc_tgt_offset_low; from 47 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->bits_per_pixel = from->bits_per_pixel; from 48 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->rc_edge_factor = from->rc_edge_factor; from 49 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->rc_quant_incr_limit1 = from->rc_quant_incr_limit1; from 50 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->rc_quant_incr_limit0 = from->rc_quant_incr_limit0; from 51 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->initial_xmit_delay = from->initial_xmit_delay; from 52 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->initial_dec_delay = from->initial_dec_delay; from 53 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->block_pred_enable = from->block_pred_enable; from 54 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->first_line_bpg_offset = from->first_line_bpg_offset; from 55 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->second_line_bpg_offset = from->second_line_bpg_offset; from 56 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->initial_offset = from->initial_offset; from 57 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c memcpy(&to->rc_buf_thresh, &from->rc_buf_thresh, sizeof(from->rc_buf_thresh)); from 58 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c memcpy(&to->rc_range_params, &from->rc_range_params, sizeof(from->rc_range_params)); from 59 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->rc_model_size = from->rc_model_size; from 60 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->flatness_min_qp = from->flatness_min_qp; from 61 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->flatness_max_qp = from->flatness_max_qp; from 62 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->initial_scale_value = from->initial_scale_value; from 63 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->scale_decrement_interval = from->scale_decrement_interval; from 64 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->scale_increment_interval = from->scale_increment_interval; from 65 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->nfl_bpg_offset = from->nfl_bpg_offset; from 66 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->nsl_bpg_offset = from->nsl_bpg_offset; from 67 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->slice_bpg_offset = from->slice_bpg_offset; from 68 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->final_offset = from->final_offset; from 69 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->vbr_enable = from->vbr_enable; from 70 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->slice_chunk_size = from->slice_chunk_size; from 71 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->second_line_offset_adj = from->second_line_offset_adj; from 72 drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c to->dsc_version_minor = from->dsc_version_minor; from 1327 drivers/gpu/drm/drm_bufs.c struct drm_buf_entry *from = &dma->bufs[i]; from 1328 drivers/gpu/drm/drm_bufs.c if (from->buf_count) { from 1329 drivers/gpu/drm/drm_bufs.c if (f(data, count, from) < 0) from 1346 drivers/gpu/drm/drm_bufs.c static int copy_one_buf(void *data, int count, struct drm_buf_entry *from) from 1350 drivers/gpu/drm/drm_bufs.c struct drm_buf_desc v = {.count = from->buf_count, from 1351 drivers/gpu/drm/drm_bufs.c .size = from->buf_size, from 1352 drivers/gpu/drm/drm_bufs.c .low_mark = from->low_mark, from 1353 drivers/gpu/drm/drm_bufs.c .high_mark = from->high_mark}; from 193 drivers/gpu/drm/drm_dp_aux_dev.c static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from) from 202 drivers/gpu/drm/drm_dp_aux_dev.c iov_iter_truncate(from, AUX_MAX_OFFSET - pos); from 204 drivers/gpu/drm/drm_dp_aux_dev.c while (iov_iter_count(from)) { from 206 drivers/gpu/drm/drm_dp_aux_dev.c ssize_t todo = min(iov_iter_count(from), sizeof(buf)); from 213 drivers/gpu/drm/drm_dp_aux_dev.c if (!copy_from_iter_full(buf, todo, from)) { from 373 drivers/gpu/drm/drm_ioc32.c static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from) from 377 drivers/gpu/drm/drm_ioc32.c drm_buf_desc32_t v = {.count = from->buf_count, from 378 drivers/gpu/drm/drm_ioc32.c .size = from->buf_size, from 379 drivers/gpu/drm/drm_ioc32.c .low_mark = from->low_mark, from 380 drivers/gpu/drm/drm_ioc32.c .high_mark = from->high_mark}; from 73 drivers/gpu/drm/etnaviv/etnaviv_buffer.c u32 from, u32 to) from 78 drivers/gpu/drm/etnaviv/etnaviv_buffer.c OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to)); from 81 drivers/gpu/drm/etnaviv/etnaviv_buffer.c static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to) from 84 drivers/gpu/drm/etnaviv/etnaviv_buffer.c VIVS_GL_SEMAPHORE_TOKEN_FROM(from) | from 499 drivers/gpu/drm/i915/gt/intel_timeline.c int intel_timeline_read_hwsp(struct i915_request *from, from 503 drivers/gpu/drm/i915/gt/intel_timeline.c struct intel_timeline_cacheline *cl = from->hwsp_cacheline; from 504 drivers/gpu/drm/i915/gt/intel_timeline.c struct intel_timeline *tl = from->timeline; from 510 drivers/gpu/drm/i915/gt/intel_timeline.c err = i915_request_completed(from); from 87 drivers/gpu/drm/i915/gt/intel_timeline.h int intel_timeline_read_hwsp(struct i915_request *from, from 951 drivers/gpu/drm/i915/gt/intel_workarounds.c wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from) from 955 drivers/gpu/drm/i915/gt/intel_workarounds.c name, from, i915_mmio_reg_offset(wa->reg), from 1000 drivers/gpu/drm/i915/gt/intel_workarounds.c const char *from) from 1009 drivers/gpu/drm/i915/gt/intel_workarounds.c wal->name, from); from 1014 drivers/gpu/drm/i915/gt/intel_workarounds.c bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from) from 1016 drivers/gpu/drm/i915/gt/intel_workarounds.c return wa_list_verify(gt->uncore, >->i915->gt_wa_list, from); from 1502 drivers/gpu/drm/i915/gt/intel_workarounds.c const char *from) from 1545 drivers/gpu/drm/i915/gt/intel_workarounds.c if (!wa_verify(wa, results[i], wal->name, from)) from 1558 drivers/gpu/drm/i915/gt/intel_workarounds.c const char *from) from 1562 drivers/gpu/drm/i915/gt/intel_workarounds.c from); from 30 drivers/gpu/drm/i915/gt/intel_workarounds.h bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from); from 38 drivers/gpu/drm/i915/gt/intel_workarounds.h const char *from); from 1136 drivers/gpu/drm/i915/i915_gem_gtt.c u64 from; from 1146 drivers/gpu/drm/i915/i915_gem_gtt.c from = start; from 1150 drivers/gpu/drm/i915/i915_gem_gtt.c if (unlikely(err && from != start)) from 1152 drivers/gpu/drm/i915/i915_gem_gtt.c from, start, vm->top); from 1692 drivers/gpu/drm/i915/i915_gem_gtt.c u64 from = start; from 1742 drivers/gpu/drm/i915/i915_gem_gtt.c gen6_ppgtt_clear_range(vm, from, start - from); from 833 drivers/gpu/drm/i915/i915_request.c struct i915_request *from, from 840 drivers/gpu/drm/i915/i915_request.c GEM_BUG_ON(!from->timeline->has_initial_breadcrumb); from 844 drivers/gpu/drm/i915/i915_request.c if (already_busywaiting(to) & from->engine->mask) from 846 drivers/gpu/drm/i915/i915_request.c &from->fence, 0, from 849 drivers/gpu/drm/i915/i915_request.c err = i915_request_await_start(to, from); from 854 drivers/gpu/drm/i915/i915_request.c err = __i915_request_await_execution(to, from, NULL, gfp); from 859 drivers/gpu/drm/i915/i915_request.c err = intel_timeline_read_hwsp(from, to, &hwsp_offset); from 879 drivers/gpu/drm/i915/i915_request.c *cs++ = from->fence.seqno; from 884 drivers/gpu/drm/i915/i915_request.c to->sched.semaphores |= from->engine->mask; from 890 drivers/gpu/drm/i915/i915_request.c i915_request_await_request(struct i915_request *to, struct i915_request *from) from 894 drivers/gpu/drm/i915/i915_request.c GEM_BUG_ON(to == from); from 895 drivers/gpu/drm/i915/i915_request.c GEM_BUG_ON(to->timeline == from->timeline); from 897 drivers/gpu/drm/i915/i915_request.c if (i915_request_completed(from)) { from 898 drivers/gpu/drm/i915/i915_request.c i915_sw_fence_set_error_once(&to->submit, from->fence.error); from 903 drivers/gpu/drm/i915/i915_request.c ret = i915_sched_node_add_dependency(&to->sched, &from->sched); from 908 drivers/gpu/drm/i915/i915_request.c if (to->engine == from->engine) { from 910 drivers/gpu/drm/i915/i915_request.c &from->submit, from 914 drivers/gpu/drm/i915/i915_request.c ret = emit_semaphore_wait(to, from, I915_FENCE_GFP); from 917 drivers/gpu/drm/i915/i915_request.c &from->fence, 0, from 925 drivers/gpu/drm/i915/i915_request.c &from->fence, 0, from 72 drivers/gpu/drm/mediatek/mtk_hdmi_regs.h #define CH_SWITCH(from, to) ((from) << ((to) * 3)) from 142 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c int from, mode; from 185 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c from = !!(ram_rd32(fuc, 0x1373f0) & 0x00000002); /*XXX: ok? */ from 213 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c if (mode == 1 && from == 0) { from 246 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c if (from == 0) { from 263 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c if (from == 0) { from 286 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c if (from == 0) { from 131 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c int from; from 165 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c if (ram->from == 2) { from 343 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c if (ram->from == 2 && ram->mode != 2) { from 354 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c if (ram->from != 2 && ram->mode != 2) { from 373 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c if (ram->from != 2 && ram->mode == 2) { from 384 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c if (ram->from == 2 && ram->mode == 2) { from 1053 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c ram->from = ram_rd32(fuc, 0x1373f4) & 0x0000000f; from 124 drivers/gpu/drm/omapdrm/dss/base.c struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from) from 140 drivers/gpu/drm/omapdrm/dss/base.c list = from ? &from->list : &omapdss_devices_list; from 160 drivers/gpu/drm/omapdrm/dss/base.c if (from) from 161 drivers/gpu/drm/omapdrm/dss/base.c omapdss_device_put(from); from 489 drivers/gpu/drm/omapdrm/dss/omapdss.h struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from); from 750 drivers/gpu/drm/ttm/ttm_bo_util.c struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type]; from 791 drivers/gpu/drm/ttm/ttm_bo_util.c } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) { from 798 drivers/gpu/drm/ttm/ttm_bo_util.c spin_lock(&from->move_lock); from 799 drivers/gpu/drm/ttm/ttm_bo_util.c if (!from->move || dma_fence_is_later(fence, from->move)) { from 800 drivers/gpu/drm/ttm/ttm_bo_util.c dma_fence_put(from->move); from 801 drivers/gpu/drm/ttm/ttm_bo_util.c from->move = dma_fence_get(fence); from 803 drivers/gpu/drm/ttm/ttm_bo_util.c spin_unlock(&from->move_lock); from 322 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c const struct vmw_ctx_binding_state *from, from 325 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c size_t offset = (unsigned long)bi - (unsigned long)from; from 445 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c struct vmw_ctx_binding_state *from) from 449 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c list_for_each_entry_safe(entry, next, &from->list, ctx_list) { from 450 drivers/gpu/drm/vmwgfx/vmwgfx_binding.c vmw_binding_transfer(to, from, entry); from 197 drivers/gpu/drm/vmwgfx/vmwgfx_binding.h struct vmw_ctx_binding_state *from); from 61 drivers/hid/hid-apple.c u16 from; from 166 drivers/hid/hid-apple.c const struct apple_key_translation *table, u16 from) from 171 drivers/hid/hid-apple.c for (trans = table; trans->from; trans++) from 172 drivers/hid/hid-apple.c if (trans->from == from) from 204 drivers/hid/hid-apple.c if (test_bit(trans->from, input->key)) from 205 drivers/hid/hid-apple.c code = trans->from; from 226 drivers/hid/hid-apple.c code = do_translate ? trans->to : trans->from; from 325 drivers/hid/hid-apple.c for (trans = apple_fn_keys; trans->from; trans++) from 328 drivers/hid/hid-apple.c for (trans = powerbook_fn_keys; trans->from; trans++) from 331 drivers/hid/hid-apple.c for (trans = powerbook_numlock_keys; trans->from; trans++) from 334 drivers/hid/hid-apple.c for (trans = apple_iso_keyboard; trans->from; trans++) from 157 drivers/hid/hid-icade.c static const struct icade_key *icade_find_translation(u16 from) from 159 drivers/hid/hid-icade.c if (from > ICADE_MAX_USAGE) from 161 drivers/hid/hid-icade.c return &icade_usage_table[from]; from 186 drivers/hwmon/adm1026.c #define SCALE(val, from, to) (((val)*(to) + ((from)/2))/(from)) from 124 drivers/hwmon/lm85.c #define SCALE(val, from, to) (((val) * (to) + ((from) / 2)) / (from)) from 267 drivers/hwmon/pmbus/pmbus_core.c int from; from 271 drivers/hwmon/pmbus/pmbus_core.c from = pmbus_read_byte_data(client, page, from 273 drivers/hwmon/pmbus/pmbus_core.c if (from < 0) from 274 drivers/hwmon/pmbus/pmbus_core.c return from; from 276 drivers/hwmon/pmbus/pmbus_core.c to = (from & ~mask) | (config & mask); from 277 drivers/hwmon/pmbus/pmbus_core.c if (to != from) { from 1326 drivers/iio/accel/bmc150-accel-core.c int from) from 1330 drivers/iio/accel/bmc150-accel-core.c for (i = from; i >= 0; i--) { from 53 drivers/iio/adc/dln2-adc.c unsigned int from; from 90 drivers/iio/adc/dln2-adc.c if (p && p->from + p->length == in_loc && from 95 drivers/iio/adc/dln2-adc.c p->from = in_loc; from 504 drivers/iio/adc/dln2-adc.c (void *)dev_data.values + t->from, t->length); from 786 drivers/iio/industrialio-buffer.c unsigned from; from 806 drivers/iio/industrialio-buffer.c if (*p && (*p)->from + (*p)->length == in_loc && from 813 drivers/iio/industrialio-buffer.c (*p)->from = in_loc; from 1378 drivers/iio/industrialio-buffer.c datain + t->from, t->length); from 739 drivers/infiniband/core/uverbs_ioctl.c const void *from, size_t size) from 748 drivers/infiniband/core/uverbs_ioctl.c if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) from 794 drivers/infiniband/core/uverbs_ioctl.c size_t idx, const void *from, size_t size) from 803 drivers/infiniband/core/uverbs_ioctl.c return uverbs_copy_to(bundle, idx, from, size); from 9284 drivers/infiniband/hw/hfi1/chip.c u16 from; from 9294 drivers/infiniband/hw/hfi1/chip.c if (opa_widths & opa_link_xlate[i].from) from 76 drivers/infiniband/hw/hfi1/file_ops.c static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from); from 302 drivers/infiniband/hw/hfi1/file_ops.c static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from) from 308 drivers/infiniband/hw/hfi1/file_ops.c unsigned long dim = from->nr_segs; from 318 drivers/infiniband/hw/hfi1/file_ops.c if (!iter_is_iovec(from) || !dim) { from 335 drivers/infiniband/hw/hfi1/file_ops.c fd, (struct iovec *)(from->iov + done), from 1354 drivers/infiniband/hw/hfi1/hfi.h u64 pbc, const void *from, size_t count); from 328 drivers/infiniband/hw/hfi1/pio.h const void *from, size_t count); from 330 drivers/infiniband/hw/hfi1/pio.h const void *from, size_t nbytes); from 331 drivers/infiniband/hw/hfi1/pio.h void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes); from 72 drivers/infiniband/hw/hfi1/pio_copy.c const void *from, size_t count) from 92 drivers/infiniband/hw/hfi1/pio_copy.c writeq(*(u64 *)from, dest); from 93 drivers/infiniband/hw/hfi1/pio_copy.c from += sizeof(u64); from 108 drivers/infiniband/hw/hfi1/pio_copy.c writeq(*(u64 *)from, dest); from 109 drivers/infiniband/hw/hfi1/pio_copy.c from += sizeof(u64); from 127 drivers/infiniband/hw/hfi1/pio_copy.c writeq(*(u64 *)from, dest); from 128 drivers/infiniband/hw/hfi1/pio_copy.c from += sizeof(u64); from 138 drivers/infiniband/hw/hfi1/pio_copy.c writeq(*(u64 *)from, dest); from 139 drivers/infiniband/hw/hfi1/pio_copy.c from += sizeof(u64); from 150 drivers/infiniband/hw/hfi1/pio_copy.c val.val32[0] = *(u32 *)from; from 225 drivers/infiniband/hw/hfi1/pio_copy.c static inline void read_low_bytes(struct pio_buf *pbuf, const void *from, from 229 drivers/infiniband/hw/hfi1/pio_copy.c jcopy(&pbuf->carry.val8[0], from, nbytes); from 242 drivers/infiniband/hw/hfi1/pio_copy.c const void *from, unsigned int nbytes) from 244 drivers/infiniband/hw/hfi1/pio_copy.c jcopy(&pbuf->carry.val8[pbuf->carry_bytes], from, nbytes); from 305 drivers/infiniband/hw/hfi1/pio_copy.c const void *from, size_t nbytes) from 324 drivers/infiniband/hw/hfi1/pio_copy.c writeq(*(u64 *)from, dest); from 325 drivers/infiniband/hw/hfi1/pio_copy.c from += sizeof(u64); from 340 drivers/infiniband/hw/hfi1/pio_copy.c writeq(*(u64 *)from, dest); from 341 drivers/infiniband/hw/hfi1/pio_copy.c from += sizeof(u64); from 359 drivers/infiniband/hw/hfi1/pio_copy.c writeq(*(u64 *)from, dest); from 360 drivers/infiniband/hw/hfi1/pio_copy.c from += sizeof(u64); from 370 drivers/infiniband/hw/hfi1/pio_copy.c writeq(*(u64 *)from, dest); from 371 drivers/infiniband/hw/hfi1/pio_copy.c from += sizeof(u64); from 380 drivers/infiniband/hw/hfi1/pio_copy.c read_low_bytes(pbuf, from, nbytes & 0x7); from 397 drivers/infiniband/hw/hfi1/pio_copy.c static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes) from 429 drivers/infiniband/hw/hfi1/pio_copy.c merge_write8(pbuf, dest, from); from 430 drivers/infiniband/hw/hfi1/pio_copy.c from += sizeof(u64); from 456 drivers/infiniband/hw/hfi1/pio_copy.c merge_write8(pbuf, dest, from); from 457 drivers/infiniband/hw/hfi1/pio_copy.c from += sizeof(u64); from 467 drivers/infiniband/hw/hfi1/pio_copy.c merge_write8(pbuf, dest, from); from 468 drivers/infiniband/hw/hfi1/pio_copy.c from += sizeof(u64); from 480 drivers/infiniband/hw/hfi1/pio_copy.c read_extra_bytes(pbuf, from, nread); from 506 drivers/infiniband/hw/hfi1/pio_copy.c from += nread; /* from is now not aligned */ from 507 drivers/infiniband/hw/hfi1/pio_copy.c read_low_bytes(pbuf, from, bytes_left); from 510 drivers/infiniband/hw/hfi1/pio_copy.c read_extra_bytes(pbuf, from, bytes_left); from 525 drivers/infiniband/hw/hfi1/pio_copy.c const void *from, size_t nbytes) from 555 drivers/infiniband/hw/hfi1/pio_copy.c writeq(*(u64 *)from, dest); from 556 drivers/infiniband/hw/hfi1/pio_copy.c from += sizeof(u64); from 582 drivers/infiniband/hw/hfi1/pio_copy.c writeq(*(u64 *)from, dest); from 583 drivers/infiniband/hw/hfi1/pio_copy.c from += sizeof(u64); from 593 drivers/infiniband/hw/hfi1/pio_copy.c writeq(*(u64 *)from, dest); from 594 drivers/infiniband/hw/hfi1/pio_copy.c from += sizeof(u64); from 599 drivers/infiniband/hw/hfi1/pio_copy.c read_low_bytes(pbuf, from, nbytes & 0x7); from 613 drivers/infiniband/hw/hfi1/pio_copy.c void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes) from 615 drivers/infiniband/hw/hfi1/pio_copy.c unsigned long from_align = (unsigned long)from & 0x7; from 619 drivers/infiniband/hw/hfi1/pio_copy.c read_extra_bytes(pbuf, from, nbytes); from 638 drivers/infiniband/hw/hfi1/pio_copy.c read_extra_bytes(pbuf, from, to_align); from 639 drivers/infiniband/hw/hfi1/pio_copy.c from += to_align; from 649 drivers/infiniband/hw/hfi1/pio_copy.c read_extra_bytes(pbuf, from, to_fill); from 650 drivers/infiniband/hw/hfi1/pio_copy.c from += to_fill; from 679 drivers/infiniband/hw/hfi1/pio_copy.c read_low_bytes(pbuf, from, extra); from 680 drivers/infiniband/hw/hfi1/pio_copy.c from += extra; from 697 drivers/infiniband/hw/hfi1/pio_copy.c mid_copy_mix(pbuf, from, nbytes); from 699 drivers/infiniband/hw/hfi1/pio_copy.c mid_copy_straight(pbuf, from, nbytes); from 1635 drivers/infiniband/hw/mlx5/odp.c u32 from) from 1642 drivers/infiniband/hw/mlx5/odp.c for (i = from; i < num_sge; ++i) { from 1145 drivers/infiniband/hw/qib/qib.h void qib_pio_copy(void __iomem *to, const void *from, size_t count); from 2242 drivers/infiniband/hw/qib/qib_file_ops.c static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from) from 2248 drivers/infiniband/hw/qib/qib_file_ops.c if (!iter_is_iovec(from) || !from->nr_segs || !pq) from 2251 drivers/infiniband/hw/qib/qib_file_ops.c return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs); from 45 drivers/infiniband/hw/qib/qib_pio_copy.c void qib_pio_copy(void __iomem *to, const void *from, size_t count) from 49 drivers/infiniband/hw/qib/qib_pio_copy.c const u64 *src = from; from 58 drivers/infiniband/hw/qib/qib_pio_copy.c const u32 *src = from; from 461 drivers/input/keyboard/applespi.c u16 from; from 1055 drivers/input/keyboard/applespi.c for (trans = table; trans->from; trans++) from 1056 drivers/input/keyboard/applespi.c if (trans->from == key) from 706 drivers/iommu/iova.c copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) from 711 drivers/iommu/iova.c spin_lock_irqsave(&from->iova_rbtree_lock, flags); from 712 drivers/iommu/iova.c for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { from 724 drivers/iommu/iova.c spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); from 2765 drivers/irqchip/irq-gic-v3-its.c static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) from 2773 drivers/irqchip/irq-gic-v3-its.c rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; from 2806 drivers/irqchip/irq-gic-v3-its.c int from = vpe->col_idx; from 2810 drivers/irqchip/irq-gic-v3-its.c its_vpe_db_proxy_move(vpe, from, cpu); from 2386 drivers/md/bcache/btree.c struct bkey *from, from 2395 drivers/md/bcache/btree.c bch_btree_iter_init(&b->keys, &iter, from); from 2400 drivers/md/bcache/btree.c op, from, fn, flags); from 2401 drivers/md/bcache/btree.c from = NULL; from 2415 drivers/md/bcache/btree.c struct bkey *from, btree_map_nodes_fn *fn, int flags) from 2417 drivers/md/bcache/btree.c return btree_root(map_nodes_recurse, c, op, from, fn, flags); from 2421 drivers/md/bcache/btree.c struct bkey *from, btree_map_keys_fn *fn, from 2428 drivers/md/bcache/btree.c bch_btree_iter_init(&b->keys, &iter, from); from 2433 drivers/md/bcache/btree.c : btree(map_keys_recurse, k, b, op, from, fn, flags); from 2434 drivers/md/bcache/btree.c from = NULL; from 2448 drivers/md/bcache/btree.c struct bkey *from, btree_map_keys_fn *fn, int flags) from 2450 drivers/md/bcache/btree.c return btree_root(map_keys_recurse, c, op, from, fn, flags); from 299 drivers/md/bcache/btree.h struct bkey *from, btree_map_nodes_fn *fn, int flags); from 302 drivers/md/bcache/btree.h struct bkey *from, btree_map_nodes_fn *fn) from 304 drivers/md/bcache/btree.h return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES); from 309 drivers/md/bcache/btree.h struct bkey *from, from 312 drivers/md/bcache/btree.h return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES); from 318 drivers/md/bcache/btree.h struct bkey *from, btree_map_keys_fn *fn, int flags); from 798 drivers/md/dm-clone-target.c struct dm_io_region from, to; from 824 drivers/md/dm-clone-target.c from.bdev = clone->source_dev->bdev; from 825 drivers/md/dm-clone-target.c from.sector = region_to_sector(clone, region_start); from 826 drivers/md/dm-clone-target.c from.count = total_size; from 829 drivers/md/dm-clone-target.c to.sector = from.sector; from 830 drivers/md/dm-clone-target.c to.count = from.count; from 834 drivers/md/dm-clone-target.c dm_kcopyd_copy(clone->kcopyd_client, &from, 1, &to, 0, from 774 drivers/md/dm-kcopyd.c void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, from 820 drivers/md/dm-kcopyd.c if (from) { from 821 drivers/md/dm-kcopyd.c job->source = *from; from 332 drivers/md/dm-raid1.c struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; from 340 drivers/md/dm-raid1.c from.bdev = m->dev->bdev; from 341 drivers/md/dm-raid1.c from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); from 347 drivers/md/dm-raid1.c from.count = ms->ti->len & (region_size - 1); from 348 drivers/md/dm-raid1.c if (!from.count) from 349 drivers/md/dm-raid1.c from.count = region_size; from 351 drivers/md/dm-raid1.c from.count = region_size; from 361 drivers/md/dm-raid1.c dest->count = from.count; from 369 drivers/md/dm-raid1.c dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, from 1354 drivers/md/dm-thin.c struct dm_io_region from, to; from 1356 drivers/md/dm-thin.c from.bdev = origin->bdev; from 1357 drivers/md/dm-thin.c from.sector = data_origin * pool->sectors_per_block; from 1358 drivers/md/dm-thin.c from.count = len; from 1364 drivers/md/dm-thin.c dm_kcopyd_copy(pool->copier, &from, 1, &to, from 1561 drivers/md/dm-writecache.c struct dm_io_region from, to; from 1573 drivers/md/dm-writecache.c from.bdev = wc->ssd_dev->bdev; from 1574 drivers/md/dm-writecache.c from.sector = cache_sector(wc, e); from 1575 drivers/md/dm-writecache.c from.count = n_sectors; from 1593 drivers/md/dm-writecache.c dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c); from 90 drivers/media/firewire/firedtv-avc.c static inline void clear_operands(struct avc_command_frame *c, int from, int to) from 92 drivers/media/firewire/firedtv-avc.c memset(&c->operand[from], 0, to - from + 1); from 95 drivers/media/firewire/firedtv-avc.c static void pad_operands(struct avc_command_frame *c, int from) from 97 drivers/media/firewire/firedtv-avc.c int to = ALIGN(from, 4); from 99 drivers/media/firewire/firedtv-avc.c if (from <= to && to <= LAST_OPERAND) from 100 drivers/media/firewire/firedtv-avc.c clear_operands(c, from, to); from 47 drivers/media/pci/cobalt/cobalt-flash.c unsigned long from, ssize_t len) from 49 drivers/media/pci/cobalt/cobalt-flash.c u32 src = from; from 65 drivers/media/pci/cobalt/cobalt-flash.c const void *from, ssize_t len) from 67 drivers/media/pci/cobalt/cobalt-flash.c const u8 *src = from; from 128 drivers/media/pci/cx18/cx18-io.h const void __iomem *from, unsigned int len) from 130 drivers/media/pci/cx18/cx18-io.h memcpy_fromio(to, from, len); from 76 drivers/media/pci/ivtv/ivtv-queue.c static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from, from 79 drivers/media/pci/ivtv/ivtv-queue.c struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list); from 81 drivers/media/pci/ivtv/ivtv-queue.c list_move_tail(from->list.next, &to->list); from 82 drivers/media/pci/ivtv/ivtv-queue.c from->buffers--; from 83 drivers/media/pci/ivtv/ivtv-queue.c from->length -= s->buf_size; from 84 drivers/media/pci/ivtv/ivtv-queue.c from->bytesused -= buf->bytesused - buf->readpos; from 110 drivers/media/pci/ivtv/ivtv-queue.c int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal, from 115 drivers/media/pci/ivtv/ivtv-queue.c int from_free = from == &s->q_free; from 122 drivers/media/pci/ivtv/ivtv-queue.c needed_bytes = from->length; from 125 drivers/media/pci/ivtv/ivtv-queue.c bytes_available = from_free ? from->length : from->bytesused; from 141 drivers/media/pci/ivtv/ivtv-queue.c list_move_tail(steal->list.prev, &from->list); from 147 drivers/media/pci/ivtv/ivtv-queue.c from->buffers++; from 148 drivers/media/pci/ivtv/ivtv-queue.c from->length += s->buf_size; from 159 drivers/media/pci/ivtv/ivtv-queue.c ivtv_queue_move_buf(s, from, to, 1); from 166 drivers/media/pci/ivtv/ivtv-queue.c ivtv_queue_move_buf(s, from, to, to_free); from 62 drivers/media/pci/ivtv/ivtv-queue.h int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal, from 890 drivers/media/platform/omap3isp/isppreview.c void __user *from = *(void __user **) from 895 drivers/media/platform/omap3isp/isppreview.c if (to && from && size) { from 896 drivers/media/platform/omap3isp/isppreview.c if (copy_from_user(to, from, size)) { from 595 drivers/media/platform/sti/bdisp/bdisp-hw.c static int bdisp_hw_get_inc(u32 from, u32 to, u16 *inc) from 602 drivers/media/platform/sti/bdisp/bdisp-hw.c if (to == from) { from 607 drivers/media/platform/sti/bdisp/bdisp-hw.c tmp = (from << 10) / to; from 438 drivers/media/rc/rc-main.c const struct rc_map *from) from 444 drivers/media/rc/rc-main.c rc = ir_create_table(dev, rc_map, from->name, from->rc_proto, from 445 drivers/media/rc/rc-main.c from->size); from 449 drivers/media/rc/rc-main.c for (i = 0; i < from->size; i++) { from 451 drivers/media/rc/rc-main.c from->scan[i].scancode, false); from 458 drivers/media/rc/rc-main.c from->scan[i].keycode); from 243 drivers/media/tuners/mt20xx.c unsigned int from, unsigned int to) from 250 drivers/media/tuners/mt20xx.c rfin,if1,if2,from,to); from 257 drivers/media/tuners/mt20xx.c ret=mt2032_compute_freq(fe,rfin,if1,if2,from,to,&buf[1],&sel,priv->xogc); from 302 drivers/media/tuners/mt20xx.c int if2,from,to; from 307 drivers/media/tuners/mt20xx.c from = 40750*1000; from 312 drivers/media/tuners/mt20xx.c from = 32900*1000; from 318 drivers/media/tuners/mt20xx.c 1090*1000*1000, if2, from, to); from 684 drivers/media/usb/cx231xx/cx231xx-video.c void cx231xx_swab(u16 *from, u16 *to, u16 len) from 692 drivers/media/usb/cx231xx/cx231xx-video.c to[i] = (from[i] << 8) | (from[i] >> 8); from 827 drivers/media/usb/cx231xx/cx231xx.h void cx231xx_swab(u16 *from, u16 *to, u16 len); from 38 drivers/media/v4l2-core/v4l2-compat-ioctl32.c #define assign_in_user(to, from) \ from 40 drivers/media/v4l2-core/v4l2-compat-ioctl32.c typeof(*from) __assign_tmp; \ from 42 drivers/media/v4l2-core/v4l2-compat-ioctl32.c get_user(__assign_tmp, from) || put_user(__assign_tmp, to); \ from 95 drivers/media/v4l2-core/v4l2-compat-ioctl32.c #define assign_in_user_cast(to, from) \ from 97 drivers/media/v4l2-core/v4l2-compat-ioctl32.c typeof(*from) __assign_tmp; \ from 99 drivers/media/v4l2-core/v4l2-compat-ioctl32.c get_user_cast(__assign_tmp, from) || put_user(__assign_tmp, to);\ from 1954 drivers/media/v4l2-core/v4l2-ctrls.c union v4l2_ctrl_ptr from, union v4l2_ctrl_ptr to) from 1958 drivers/media/v4l2-core/v4l2-ctrls.c memcpy(to.p, from.p, ctrl->elems * ctrl->elem_size); from 3107 drivers/media/v4l2-core/v4l2-ctrls.c const struct v4l2_ctrl_handler *from) from 3112 drivers/media/v4l2-core/v4l2-ctrls.c if (WARN_ON(!hdl || hdl == from)) from 3120 drivers/media/v4l2-core/v4l2-ctrls.c mutex_lock(from->lock); from 3121 drivers/media/v4l2-core/v4l2-ctrls.c list_for_each_entry(ref, &from->ctrl_refs, node) { from 3135 drivers/media/v4l2-core/v4l2-ctrls.c mutex_unlock(from->lock); from 3239 drivers/media/v4l2-core/v4l2-ctrls.c struct v4l2_ctrl_handler *from) from 3243 drivers/media/v4l2-core/v4l2-ctrls.c ret = v4l2_ctrl_request_clone(hdl, from); from 3247 drivers/media/v4l2-core/v4l2-ctrls.c from, false, &hdl->req_obj); from 3249 drivers/media/v4l2-core/v4l2-ctrls.c list_add_tail(&hdl->requests, &from->requests); from 139 drivers/media/v4l2-core/v4l2-dev.c static inline int devnode_find(struct video_device *vdev, int from, int to) from 141 drivers/media/v4l2-core/v4l2-dev.c return find_next_zero_bit(devnode_bits(vdev->vfl_type), to, from); from 234 drivers/misc/cxl/flash.c void *dest, *from; from 279 drivers/misc/cxl/flash.c from = (void *) ai->data; from 294 drivers/misc/cxl/flash.c if (copy_from_user(dest, from, s_copy)) from 302 drivers/misc/cxl/flash.c from += s_copy; from 331 drivers/misc/vmw_vmci/vmci_queue_pair.c struct iov_iter *from, from 358 drivers/misc/vmw_vmci/vmci_queue_pair.c from)) { from 2529 drivers/misc/vmw_vmci/vmci_queue_pair.c struct iov_iter *from) from 2533 drivers/misc/vmw_vmci/vmci_queue_pair.c size_t buf_size = iov_iter_count(from); from 2553 drivers/misc/vmw_vmci/vmci_queue_pair.c result = qp_memcpy_to_queue_iter(produce_q, tail, from, written); from 2559 drivers/misc/vmw_vmci/vmci_queue_pair.c result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp); from 2561 drivers/misc/vmw_vmci/vmci_queue_pair.c result = qp_memcpy_to_queue_iter(produce_q, 0, from, from 3016 drivers/misc/vmw_vmci/vmci_queue_pair.c struct iov_iter from; from 3022 drivers/misc/vmw_vmci/vmci_queue_pair.c iov_iter_kvec(&from, WRITE, &v, 1, buf_size); from 3030 drivers/misc/vmw_vmci/vmci_queue_pair.c &from); from 1090 drivers/mmc/core/block.c unsigned int from, nr; from 1099 drivers/mmc/core/block.c from = blk_rq_pos(req); from 1113 drivers/mmc/core/block.c err = mmc_erase(card, from, nr, card->erase_arg); from 1128 drivers/mmc/core/block.c unsigned int from, nr, arg; from 1137 drivers/mmc/core/block.c from = blk_rq_pos(req); from 1140 drivers/mmc/core/block.c if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) from 1157 drivers/mmc/core/block.c err = mmc_erase(card, from, nr, arg); from 1175 drivers/mmc/core/block.c err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); from 1655 drivers/mmc/core/core.c static int mmc_do_erase(struct mmc_card *card, unsigned int from, from 1685 drivers/mmc/core/core.c (from >> card->erase_shift)) + 1; from 1687 drivers/mmc/core/core.c qty += to - from + 1; from 1690 drivers/mmc/core/core.c (from / card->erase_size)) + 1; from 1693 drivers/mmc/core/core.c from <<= 9; from 1701 drivers/mmc/core/core.c cmd.arg = from; from 1805 drivers/mmc/core/core.c unsigned int *from, from 1809 drivers/mmc/core/core.c unsigned int from_new = *from, nr_new = nr, rem; from 1847 drivers/mmc/core/core.c *from = from_new; from 1861 drivers/mmc/core/core.c int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, from 1864 drivers/mmc/core/core.c unsigned int rem, to = from + nr; from 1886 drivers/mmc/core/core.c if (from % card->erase_size || nr % card->erase_size) from 1891 drivers/mmc/core/core.c nr = mmc_align_erase_size(card, &from, &to, nr); from 1896 drivers/mmc/core/core.c if (to <= from) from 1910 drivers/mmc/core/core.c rem = card->erase_size - (from % card->erase_size); from 1912 drivers/mmc/core/core.c err = mmc_do_erase(card, from, from + rem - 1, arg); from 1913 drivers/mmc/core/core.c from += rem; from 1914 drivers/mmc/core/core.c if ((err) || (to <= from)) from 1918 drivers/mmc/core/core.c return mmc_do_erase(card, from, to, arg); from 1971 drivers/mmc/core/core.c int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, from 1976 drivers/mmc/core/core.c if (from % card->erase_size || nr % card->erase_size) from 109 drivers/mmc/core/core.h int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, from 116 drivers/mmc/core/core.h int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, from 301 drivers/mmc/host/sdhci-acpi.c struct pci_dev *dev, *parent, *from = NULL; from 304 drivers/mmc/host/sdhci-acpi.c dev = pci_get_device(vendor, device, from); from 305 drivers/mmc/host/sdhci-acpi.c pci_dev_put(from); from 315 drivers/mmc/host/sdhci-acpi.c from = dev; from 93 drivers/mtd/chips/cfi_cmdset_0001.c static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, from 95 drivers/mtd/chips/cfi_cmdset_0001.c static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len); from 1255 drivers/mtd/chips/cfi_cmdset_0001.c #define XIP_INVAL_CACHED_RANGE(map, from, size) \ from 1256 drivers/mtd/chips/cfi_cmdset_0001.c INVALIDATE_CACHED_RANGE(map, from, size) from 1379 drivers/mtd/chips/cfi_cmdset_0001.c static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len, from 1394 drivers/mtd/chips/cfi_cmdset_0001.c chipnum = (from >> cfi->chipshift); from 1395 drivers/mtd/chips/cfi_cmdset_0001.c ofs = from - (chipnum << cfi->chipshift); from 1432 drivers/mtd/chips/cfi_cmdset_0001.c static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len) from 1442 drivers/mtd/chips/cfi_cmdset_0001.c chipnum = (from >> cfi->chipshift); from 1443 drivers/mtd/chips/cfi_cmdset_0001.c ofs = from - (chipnum << cfi->chipshift); from 1511 drivers/mtd/chips/cfi_cmdset_0001.c static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) from 1520 drivers/mtd/chips/cfi_cmdset_0001.c chipnum = (from >> cfi->chipshift); from 1521 drivers/mtd/chips/cfi_cmdset_0001.c ofs = from - (chipnum << cfi->chipshift); from 2295 drivers/mtd/chips/cfi_cmdset_0001.c static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, from 2382 drivers/mtd/chips/cfi_cmdset_0001.c otpinfo->start = from; from 2387 drivers/mtd/chips/cfi_cmdset_0001.c from += groupsize; from 2390 drivers/mtd/chips/cfi_cmdset_0001.c } else if (from >= groupsize) { from 2391 drivers/mtd/chips/cfi_cmdset_0001.c from -= groupsize; from 2395 drivers/mtd/chips/cfi_cmdset_0001.c data_offset += from; from 2396 drivers/mtd/chips/cfi_cmdset_0001.c size -= from; from 2397 drivers/mtd/chips/cfi_cmdset_0001.c from = 0; from 2429 drivers/mtd/chips/cfi_cmdset_0001.c static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, from 2433 drivers/mtd/chips/cfi_cmdset_0001.c return cfi_intelext_otp_walk(mtd, from, len, retlen, from 2437 drivers/mtd/chips/cfi_cmdset_0001.c static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from, from 2441 drivers/mtd/chips/cfi_cmdset_0001.c return cfi_intelext_otp_walk(mtd, from, len, retlen, from 2445 drivers/mtd/chips/cfi_cmdset_0001.c static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from, from 2449 drivers/mtd/chips/cfi_cmdset_0001.c return cfi_intelext_otp_walk(mtd, from, len, retlen, from 2454 drivers/mtd/chips/cfi_cmdset_0001.c loff_t from, size_t len) from 2457 drivers/mtd/chips/cfi_cmdset_0001.c return cfi_intelext_otp_walk(mtd, from, len, &retlen, from 1162 drivers/mtd/chips/cfi_cmdset_0002.c #define XIP_INVAL_CACHED_RANGE(map, from, size) \ from 1163 drivers/mtd/chips/cfi_cmdset_0002.c INVALIDATE_CACHED_RANGE(map, from, size) from 1240 drivers/mtd/chips/cfi_cmdset_0002.c static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) from 1249 drivers/mtd/chips/cfi_cmdset_0002.c chipnum = (from >> cfi->chipshift); from 1250 drivers/mtd/chips/cfi_cmdset_0002.c ofs = from - (chipnum << cfi->chipshift); from 1348 drivers/mtd/chips/cfi_cmdset_0002.c static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) from 1358 drivers/mtd/chips/cfi_cmdset_0002.c chipnum=from>>3; from 1359 drivers/mtd/chips/cfi_cmdset_0002.c ofs=from & 7; from 1485 drivers/mtd/chips/cfi_cmdset_0002.c static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, from 1578 drivers/mtd/chips/cfi_cmdset_0002.c otpinfo->start = from; from 1583 drivers/mtd/chips/cfi_cmdset_0002.c from += otpsize; from 1584 drivers/mtd/chips/cfi_cmdset_0002.c } else if ((from < otpsize) && (len > 0)) { from 1586 drivers/mtd/chips/cfi_cmdset_0002.c size = (len < otpsize - from) ? len : otpsize - from; from 1587 drivers/mtd/chips/cfi_cmdset_0002.c ret = action(map, chip, otpoffset + from, size, buf, from 1595 drivers/mtd/chips/cfi_cmdset_0002.c from = 0; from 1597 drivers/mtd/chips/cfi_cmdset_0002.c from -= otpsize; from 1617 drivers/mtd/chips/cfi_cmdset_0002.c static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, from 1621 drivers/mtd/chips/cfi_cmdset_0002.c return cfi_amdstd_otp_walk(mtd, from, len, retlen, from 1625 drivers/mtd/chips/cfi_cmdset_0002.c static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, from 1629 drivers/mtd/chips/cfi_cmdset_0002.c return cfi_amdstd_otp_walk(mtd, from, len, retlen, from 1633 drivers/mtd/chips/cfi_cmdset_0002.c static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from, from 1637 drivers/mtd/chips/cfi_cmdset_0002.c return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf, from 1641 drivers/mtd/chips/cfi_cmdset_0002.c static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, from 1645 drivers/mtd/chips/cfi_cmdset_0002.c return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL, from 384 drivers/mtd/chips/cfi_cmdset_0020.c static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) from 393 drivers/mtd/chips/cfi_cmdset_0020.c chipnum = (from >> cfi->chipshift); from 394 drivers/mtd/chips/cfi_cmdset_0020.c ofs = from - (chipnum << cfi->chipshift); from 72 drivers/mtd/chips/map_absent.c static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) from 23 drivers/mtd/chips/map_ram.c static int mapram_point (struct mtd_info *mtd, loff_t from, size_t len, from 25 drivers/mtd/chips/map_ram.c static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len); from 86 drivers/mtd/chips/map_ram.c static int mapram_point(struct mtd_info *mtd, loff_t from, size_t len, from 93 drivers/mtd/chips/map_ram.c *virt = map->virt + from; from 95 drivers/mtd/chips/map_ram.c *phys = map->phys + from; from 100 drivers/mtd/chips/map_ram.c static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) from 105 drivers/mtd/chips/map_ram.c static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) from 109 drivers/mtd/chips/map_ram.c map_copy_from(map, buf, from, len); from 23 drivers/mtd/chips/map_rom.c static int maprom_point (struct mtd_info *mtd, loff_t from, size_t len, from 25 drivers/mtd/chips/map_rom.c static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len); from 72 drivers/mtd/chips/map_rom.c static int maprom_point(struct mtd_info *mtd, loff_t from, size_t len, from 79 drivers/mtd/chips/map_rom.c *virt = map->virt + from; from 81 drivers/mtd/chips/map_rom.c *phys = map->phys + from; from 86 drivers/mtd/chips/map_rom.c static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len) from 91 drivers/mtd/chips/map_rom.c static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) from 95 drivers/mtd/chips/map_rom.c map_copy_from(map, buf, from, len); from 95 drivers/mtd/devices/bcm47xxsflash.c static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len, from 102 drivers/mtd/devices/bcm47xxsflash.c if ((from + len) > mtd->size) from 106 drivers/mtd/devices/bcm47xxsflash.c if (from < BCM47XXSFLASH_WINDOW_SZ) { from 109 drivers/mtd/devices/bcm47xxsflash.c memcpy_len = min(len, (size_t)(BCM47XXSFLASH_WINDOW_SZ - from)); from 110 drivers/mtd/devices/bcm47xxsflash.c memcpy_fromio(buf, b47s->window + from, memcpy_len); from 111 drivers/mtd/devices/bcm47xxsflash.c from += memcpy_len; from 118 drivers/mtd/devices/bcm47xxsflash.c b47s->cc_write(b47s, BCMA_CC_FLASHADDR, from++); from 87 drivers/mtd/devices/block2mtd.c size_t from = instr->addr; from 92 drivers/mtd/devices/block2mtd.c err = _block2mtd_erase(dev, from, len); from 101 drivers/mtd/devices/block2mtd.c static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len, from 106 drivers/mtd/devices/block2mtd.c int index = from >> PAGE_SHIFT; from 107 drivers/mtd/devices/block2mtd.c int offset = from & (PAGE_SIZE-1); from 832 drivers/mtd/devices/docg3.c static void calc_block_sector(loff_t from, int *block0, int *block1, int *page, from 841 drivers/mtd/devices/docg3.c sector = from / DOC_LAYOUT_PAGE_SIZE; from 865 drivers/mtd/devices/docg3.c static int doc_read_oob(struct mtd_info *mtd, loff_t from, from 889 drivers/mtd/devices/docg3.c from, ops->mode, buf, len, oobbuf, ooblen); from 896 drivers/mtd/devices/docg3.c skip = from % DOC_LAYOUT_PAGE_SIZE; from 899 drivers/mtd/devices/docg3.c calc_block_sector(from - skip, &block0, &block1, &page, &ofs, from 964 drivers/mtd/devices/docg3.c from += DOC_LAYOUT_PAGE_SIZE; from 1005 drivers/mtd/devices/docg3.c static int doc_block_isbad(struct mtd_info *mtd, loff_t from) from 1010 drivers/mtd/devices/docg3.c calc_block_sector(from, &block0, &block1, &page, &ofs, from 1013 drivers/mtd/devices/docg3.c from, block0, block1, page, ofs); from 1035 drivers/mtd/devices/docg3.c static int doc_get_erase_count(struct docg3 *docg3, loff_t from) from 1041 drivers/mtd/devices/docg3.c doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf); from 1042 drivers/mtd/devices/docg3.c if (from % DOC_LAYOUT_PAGE_SIZE) from 1044 drivers/mtd/devices/docg3.c calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable); from 425 drivers/mtd/devices/lart.c static int flash_read (struct mtd_info *mtd,loff_t from,size_t len,size_t *retlen,u_char *buf) from 428 drivers/mtd/devices/lart.c printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len); from 435 drivers/mtd/devices/lart.c if (from & (BUSWIDTH - 1)) from 437 drivers/mtd/devices/lart.c int gap = BUSWIDTH - (from & (BUSWIDTH - 1)); from 439 drivers/mtd/devices/lart.c while (len && gap--) *buf++ = read8 (from++), len--; from 445 drivers/mtd/devices/lart.c *((__u32 *) buf) = read32 (from); from 448 drivers/mtd/devices/lart.c from += BUSWIDTH; from 454 drivers/mtd/devices/lart.c while (len--) *buf++ = read8 (from++); from 99 drivers/mtd/devices/mchp23k256.c static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len, from 114 drivers/mtd/devices/mchp23k256.c mchp23k256_addr2cmd(flash, from, command); from 53 drivers/mtd/devices/ms02-nv.c static int ms02nv_read(struct mtd_info *mtd, loff_t from, from 58 drivers/mtd/devices/ms02-nv.c memcpy(buf, mp->uaddr + from, len); from 229 drivers/mtd/devices/mtd_dataflash.c static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len, from 240 drivers/mtd/devices/mtd_dataflash.c (unsigned int)from, (unsigned int)(from + len)); from 243 drivers/mtd/devices/mtd_dataflash.c addr = (((unsigned)from / priv->page_size) << priv->page_offset) from 244 drivers/mtd/devices/mtd_dataflash.c + ((unsigned)from % priv->page_size); from 281 drivers/mtd/devices/mtd_dataflash.c (unsigned)from, (unsigned)(from + len), from 496 drivers/mtd/devices/mtd_dataflash.c loff_t from, size_t len, size_t *retlen, u_char *buf) from 503 drivers/mtd/devices/mtd_dataflash.c status = otp_read(priv->spi, 64, buf, from, len); from 513 drivers/mtd/devices/mtd_dataflash.c loff_t from, size_t len, size_t *retlen, u_char *buf) from 520 drivers/mtd/devices/mtd_dataflash.c status = otp_read(priv->spi, 0, buf, from, len); from 530 drivers/mtd/devices/mtd_dataflash.c loff_t from, size_t len, size_t *retlen, u_char *buf) from 539 drivers/mtd/devices/mtd_dataflash.c if (from >= 64) { from 549 drivers/mtd/devices/mtd_dataflash.c if ((from + len) > 64) from 550 drivers/mtd/devices/mtd_dataflash.c len = 64 - from; from 559 drivers/mtd/devices/mtd_dataflash.c memcpy(scratch + 4 + from, buf, len); from 65 drivers/mtd/devices/mtdram.c static int ram_point(struct mtd_info *mtd, loff_t from, size_t len, from 68 drivers/mtd/devices/mtdram.c *virt = mtd->priv + from; from 94 drivers/mtd/devices/mtdram.c static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) from 99 drivers/mtd/devices/mtdram.c static int ram_read(struct mtd_info *mtd, loff_t from, size_t len, from 102 drivers/mtd/devices/mtdram.c memcpy(buf, mtd->priv + from, len); from 46 drivers/mtd/devices/phram.c static int phram_point(struct mtd_info *mtd, loff_t from, size_t len, from 49 drivers/mtd/devices/phram.c *virt = mtd->priv + from; from 54 drivers/mtd/devices/phram.c static int phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) from 59 drivers/mtd/devices/phram.c static int phram_read(struct mtd_info *mtd, loff_t from, size_t len, from 64 drivers/mtd/devices/phram.c memcpy(buf, start + from, len); from 132 drivers/mtd/devices/pmc551.c static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len, from 189 drivers/mtd/devices/pmc551.c static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len, from 197 drivers/mtd/devices/pmc551.c printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len); from 200 drivers/mtd/devices/pmc551.c soff_hi = from & ~(priv->asize - 1); from 201 drivers/mtd/devices/pmc551.c soff_lo = from & (priv->asize - 1); from 204 drivers/mtd/devices/pmc551.c if (priv->curr_map0 != from) { from 215 drivers/mtd/devices/pmc551.c static int pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len) from 223 drivers/mtd/devices/pmc551.c static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len, from 235 drivers/mtd/devices/pmc551.c (long)from, (long)len, (long)priv->asize); from 238 drivers/mtd/devices/pmc551.c end = from + len - 1; from 239 drivers/mtd/devices/pmc551.c soff_hi = from & ~(priv->asize - 1); from 243 drivers/mtd/devices/pmc551.c pmc551_point(mtd, from, len, retlen, (void **)&ptr, NULL); from 137 drivers/mtd/devices/powernv_flash.c static int powernv_flash_read(struct mtd_info *mtd, loff_t from, size_t len, from 140 drivers/mtd/devices/powernv_flash.c return powernv_flash_async_op(mtd, FLASH_OP_READ, from, from 92 drivers/mtd/devices/slram.c static int slram_point(struct mtd_info *mtd, loff_t from, size_t len, from 97 drivers/mtd/devices/slram.c *virt = priv->start + from; from 102 drivers/mtd/devices/slram.c static int slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) from 107 drivers/mtd/devices/slram.c static int slram_read(struct mtd_info *mtd, loff_t from, size_t len, from 112 drivers/mtd/devices/slram.c memcpy(buf, priv->start + from, len); from 545 drivers/mtd/devices/spear_smi.c static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, from 563 drivers/mtd/devices/spear_smi.c src = flash->base_addr + from; from 605 drivers/mtd/devices/spear_smi.c const unsigned char *from = src; from 609 drivers/mtd/devices/spear_smi.c writeb(*from, dest); from 610 drivers/mtd/devices/spear_smi.c from++; from 206 drivers/mtd/devices/sst25l.c static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len, from 219 drivers/mtd/devices/sst25l.c command[1] = from >> 16; from 220 drivers/mtd/devices/sst25l.c command[2] = from >> 8; from 221 drivers/mtd/devices/sst25l.c command[3] = from; from 1670 drivers/mtd/devices/st_spi_fsm.c static int stfsm_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, from 1677 drivers/mtd/devices/st_spi_fsm.c __func__, (u32)from, len); from 1684 drivers/mtd/devices/st_spi_fsm.c stfsm_read(fsm, buf, bytes, from); from 1687 drivers/mtd/devices/st_spi_fsm.c from += bytes; from 42 drivers/mtd/hyperbus/hyperbus-core.c unsigned long from, ssize_t len) from 47 drivers/mtd/hyperbus/hyperbus-core.c ctlr->ops->copy_from(hbdev, to, from, len); from 51 drivers/mtd/hyperbus/hyperbus-core.c const void *from, ssize_t len) from 56 drivers/mtd/hyperbus/hyperbus-core.c ctlr->ops->copy_to(hbdev, to, from, len); from 75 drivers/mtd/maps/dc21285.c static void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) from 77 drivers/mtd/maps/dc21285.c memcpy(to, (void*)(map->virt + from), len); from 105 drivers/mtd/maps/dc21285.c static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const void *from, ssize_t len) from 109 drivers/mtd/maps/dc21285.c d.x[0] = *((uint32_t*)from); from 111 drivers/mtd/maps/dc21285.c from += 4; from 117 drivers/mtd/maps/dc21285.c static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const void *from, ssize_t len) from 121 drivers/mtd/maps/dc21285.c d.x[0] = *((uint16_t*)from); from 123 drivers/mtd/maps/dc21285.c from += 2; from 129 drivers/mtd/maps/dc21285.c static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len) from 132 drivers/mtd/maps/dc21285.c d.x[0] = *((uint8_t*)from); from 134 drivers/mtd/maps/dc21285.c from++; from 102 drivers/mtd/maps/ixp4xx.c unsigned long from, ssize_t len) from 105 drivers/mtd/maps/ixp4xx.c void __iomem *src = map->virt + from; from 110 drivers/mtd/maps/ixp4xx.c if (from & 1) { from 82 drivers/mtd/maps/lantiq-flash.c unsigned long from, ssize_t len) from 84 drivers/mtd/maps/lantiq-flash.c unsigned char *f = (unsigned char *)map->virt + from; from 96 drivers/mtd/maps/lantiq-flash.c const void *from, ssize_t len) from 98 drivers/mtd/maps/lantiq-flash.c unsigned char *f = (unsigned char *)from; from 23 drivers/mtd/maps/map_funcs.c static void __xipram simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) from 25 drivers/mtd/maps/map_funcs.c inline_map_copy_from(map, to, from, len); from 28 drivers/mtd/maps/map_funcs.c static void __xipram simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) from 30 drivers/mtd/maps/map_funcs.c inline_map_copy_to(map, to, from, len); from 53 drivers/mtd/maps/pci.c static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from, ssize_t len) from 56 drivers/mtd/maps/pci.c memcpy_fromio(to, map->base + map->translate(map, from), len); from 71 drivers/mtd/maps/pci.c static void mtd_pci_copyto(struct map_info *_map, unsigned long to, const void *from, ssize_t len) from 74 drivers/mtd/maps/pci.c memcpy_toio(map->base + map->translate(map, to), from, len); from 138 drivers/mtd/maps/pcmciamtd.c static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long from, ssize_t len) from 143 drivers/mtd/maps/pcmciamtd.c pr_debug("to = %p from = %lu len = %zd\n", to, from, len); from 145 drivers/mtd/maps/pcmciamtd.c int toread = win_size - (from & (win_size-1)); from 151 drivers/mtd/maps/pcmciamtd.c addr = remap_window(map, from); from 159 drivers/mtd/maps/pcmciamtd.c from += toread; from 187 drivers/mtd/maps/pcmciamtd.c static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const void *from, ssize_t len) from 192 drivers/mtd/maps/pcmciamtd.c pr_debug("to = %lu from = %p len = %zd\n", to, from, len); from 204 drivers/mtd/maps/pcmciamtd.c pr_debug("memcpy from %p to %p len = %d\n", from, addr, towrite); from 205 drivers/mtd/maps/pcmciamtd.c memcpy_toio(addr, from, towrite); from 208 drivers/mtd/maps/pcmciamtd.c from += towrite; from 247 drivers/mtd/maps/pcmciamtd.c static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) from 254 drivers/mtd/maps/pcmciamtd.c pr_debug("to = %p from = %lu len = %zd\n", to, from, len); from 255 drivers/mtd/maps/pcmciamtd.c memcpy_fromio(to, win_base + from, len); from 285 drivers/mtd/maps/pcmciamtd.c static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) from 292 drivers/mtd/maps/pcmciamtd.c pr_debug("to = %lu from = %p len = %zd\n", to, from, len); from 293 drivers/mtd/maps/pcmciamtd.c memcpy_toio(win_base + to, from, len); from 108 drivers/mtd/maps/physmap-gemini.c void *to, unsigned long from, from 112 drivers/mtd/maps/physmap-gemini.c inline_map_copy_from(map, to, from, len); from 118 drivers/mtd/maps/physmap-gemini.c const void *from, ssize_t len) from 121 drivers/mtd/maps/physmap-gemini.c inline_map_copy_to(map, to, from, len); from 25 drivers/mtd/maps/pxa2xx-flash.c static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from, from 28 drivers/mtd/maps/pxa2xx-flash.c unsigned long start = (unsigned long)map->cached + from; from 112 drivers/mtd/maps/sbc_gxx.c static void sbc_gxx_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) from 116 drivers/mtd/maps/sbc_gxx.c if (len > (WINDOW_LENGTH - (from & WINDOW_MASK))) from 117 drivers/mtd/maps/sbc_gxx.c thislen = WINDOW_LENGTH-(from & WINDOW_MASK); from 120 drivers/mtd/maps/sbc_gxx.c sbc_gxx_page(map, from); from 121 drivers/mtd/maps/sbc_gxx.c memcpy_fromio(to, iomapadr + (from & WINDOW_MASK), thislen); from 124 drivers/mtd/maps/sbc_gxx.c from += thislen; from 137 drivers/mtd/maps/sbc_gxx.c static void sbc_gxx_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) from 146 drivers/mtd/maps/sbc_gxx.c memcpy_toio(iomapadr + (to & WINDOW_MASK), from, thislen); from 149 drivers/mtd/maps/sbc_gxx.c from += thislen; from 54 drivers/mtd/maps/uclinux.c static int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len, from 58 drivers/mtd/maps/uclinux.c *virt = map->virt + from; from 60 drivers/mtd/maps/uclinux.c *phys = map->phys + from; from 350 drivers/mtd/maps/vmu-flash.c static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len, from 367 drivers/mtd/maps/vmu-flash.c if (from + len > numblocks * card->blocklen) from 368 drivers/mtd/maps/vmu-flash.c len = numblocks * card->blocklen - from; from 374 drivers/mtd/maps/vmu-flash.c vblock = ofs_to_block(from + index, mtd, partition); from 400 drivers/mtd/maps/vmu-flash.c cx = vmu_flash_read_char(from + index, &retval, mtd); from 54 drivers/mtd/mtdconcat.c concat_read(struct mtd_info *mtd, loff_t from, size_t len, from 65 drivers/mtd/mtdconcat.c if (from >= subdev->size) { from 68 drivers/mtd/mtdconcat.c from -= subdev->size; from 71 drivers/mtd/mtdconcat.c if (from + len > subdev->size) from 73 drivers/mtd/mtdconcat.c size = subdev->size - from; from 78 drivers/mtd/mtdconcat.c err = mtd_read(subdev, from, size, &retsize, buf); from 100 drivers/mtd/mtdconcat.c from = 0; from 219 drivers/mtd/mtdconcat.c concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) from 230 drivers/mtd/mtdconcat.c if (from >= subdev->size) { from 231 drivers/mtd/mtdconcat.c from -= subdev->size; from 236 drivers/mtd/mtdconcat.c if (from + devops.len > subdev->size) from 237 drivers/mtd/mtdconcat.c devops.len = subdev->size - from; from 239 drivers/mtd/mtdconcat.c err = mtd_read_oob(subdev, from, &devops); from 270 drivers/mtd/mtdconcat.c from = 0; from 1091 drivers/mtd/mtdcore.c int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, from 1100 drivers/mtd/mtdcore.c if (from < 0 || from >= mtd->size || len > mtd->size - from) from 1104 drivers/mtd/mtdcore.c return mtd->_point(mtd, from, len, retlen, virt, phys); from 1109 drivers/mtd/mtdcore.c int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) from 1113 drivers/mtd/mtdcore.c if (from < 0 || from >= mtd->size || len > mtd->size - from) from 1117 drivers/mtd/mtdcore.c return mtd->_unpoint(mtd, from, len); from 1144 drivers/mtd/mtdcore.c int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, from 1153 drivers/mtd/mtdcore.c ret = mtd_read_oob(mtd, from, &ops); from 1235 drivers/mtd/mtdcore.c int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) from 1240 drivers/mtd/mtdcore.c ret_code = mtd_check_oob_ops(mtd, from, ops); from 1251 drivers/mtd/mtdcore.c ret_code = mtd->_read_oob(mtd, from, ops); from 1253 drivers/mtd/mtdcore.c ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen, from 1674 drivers/mtd/mtdcore.c int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, from 1682 drivers/mtd/mtdcore.c return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf); from 1697 drivers/mtd/mtdcore.c int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, from 1705 drivers/mtd/mtdcore.c return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf); from 1731 drivers/mtd/mtdcore.c int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) from 1737 drivers/mtd/mtdcore.c return mtd->_lock_user_prot_reg(mtd, from, len); from 65 drivers/mtd/mtdpart.c static int part_read(struct mtd_info *mtd, loff_t from, size_t len, from 73 drivers/mtd/mtdpart.c res = part->parent->_read(part->parent, from + part->offset, len, from 84 drivers/mtd/mtdpart.c static int part_point(struct mtd_info *mtd, loff_t from, size_t len, from 89 drivers/mtd/mtdpart.c return part->parent->_point(part->parent, from + part->offset, len, from 93 drivers/mtd/mtdpart.c static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) from 97 drivers/mtd/mtdpart.c return part->parent->_unpoint(part->parent, from + part->offset, len); from 100 drivers/mtd/mtdpart.c static int part_read_oob(struct mtd_info *mtd, loff_t from, from 108 drivers/mtd/mtdpart.c res = part->parent->_read_oob(part->parent, from + part->offset, ops); from 118 drivers/mtd/mtdpart.c static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, from 122 drivers/mtd/mtdpart.c return part->parent->_read_user_prot_reg(part->parent, from, len, from 134 drivers/mtd/mtdpart.c static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, from 138 drivers/mtd/mtdpart.c return part->parent->_read_fact_prot_reg(part->parent, from, len, from 174 drivers/mtd/mtdpart.c static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, from 178 drivers/mtd/mtdpart.c return part->parent->_write_user_prot_reg(part->parent, from, len, from 182 drivers/mtd/mtdpart.c static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, from 186 drivers/mtd/mtdpart.c return part->parent->_lock_user_prot_reg(part->parent, from, len); from 297 drivers/mtd/mtdswap.c static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from, from 300 drivers/mtd/mtdswap.c int ret = mtd_read_oob(d->mtd, from, ops); from 307 drivers/mtd/mtdswap.c ret, from); from 314 drivers/mtd/mtdswap.c ops->oobretlen, ops->ooblen, from); from 1119 drivers/mtd/nand/onenand/onenand_base.c static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from, from 1133 drivers/mtd/nand/onenand/onenand_base.c pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, from 1137 drivers/mtd/nand/onenand/onenand_base.c oobcolumn = from & (mtd->oobsize - 1); from 1140 drivers/mtd/nand/onenand/onenand_base.c if (from + len > mtd->size) { from 1155 drivers/mtd/nand/onenand/onenand_base.c column = from & (writesize - 1); from 1159 drivers/mtd/nand/onenand/onenand_base.c if (!onenand_check_bufferram(mtd, from)) { from 1160 drivers/mtd/nand/onenand/onenand_base.c this->command(mtd, ONENAND_CMD_READ, from, writesize); from 1164 drivers/mtd/nand/onenand/onenand_base.c ret = onenand_recover_lsb(mtd, from, ret); from 1165 drivers/mtd/nand/onenand/onenand_base.c onenand_update_bufferram(mtd, from, !ret); from 1190 drivers/mtd/nand/onenand/onenand_base.c from += thislen; from 1220 drivers/mtd/nand/onenand/onenand_base.c static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, from 1234 drivers/mtd/nand/onenand/onenand_base.c pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, from 1238 drivers/mtd/nand/onenand/onenand_base.c oobcolumn = from & (mtd->oobsize - 1); from 1241 drivers/mtd/nand/onenand/onenand_base.c if ((from + len) > mtd->size) { from 1255 drivers/mtd/nand/onenand/onenand_base.c if (!onenand_check_bufferram(mtd, from)) { from 1256 drivers/mtd/nand/onenand/onenand_base.c this->command(mtd, ONENAND_CMD_READ, from, writesize); from 1258 drivers/mtd/nand/onenand/onenand_base.c onenand_update_bufferram(mtd, from, !ret); from 1265 drivers/mtd/nand/onenand/onenand_base.c column = from & (writesize - 1); from 1271 drivers/mtd/nand/onenand/onenand_base.c from += thislen; from 1273 drivers/mtd/nand/onenand/onenand_base.c this->command(mtd, ONENAND_CMD_READ, from, writesize); from 1280 drivers/mtd/nand/onenand/onenand_base.c unlikely(from == (this->chipsize >> 1))) { from 1318 drivers/mtd/nand/onenand/onenand_base.c onenand_update_bufferram(mtd, from, !ret); from 1349 drivers/mtd/nand/onenand/onenand_base.c static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from, from 1360 drivers/mtd/nand/onenand/onenand_base.c from += ops->ooboffs; from 1362 drivers/mtd/nand/onenand/onenand_base.c pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from, from 1373 drivers/mtd/nand/onenand/onenand_base.c column = from & (mtd->oobsize - 1); from 1391 drivers/mtd/nand/onenand/onenand_base.c this->command(mtd, readcmd, from, mtd->oobsize); from 1393 drivers/mtd/nand/onenand/onenand_base.c onenand_update_bufferram(mtd, from, 0); from 1397 drivers/mtd/nand/onenand/onenand_base.c ret = onenand_recover_lsb(mtd, from, ret); from 1420 drivers/mtd/nand/onenand/onenand_base.c from += mtd->writesize; from 1444 drivers/mtd/nand/onenand/onenand_base.c static int onenand_read_oob(struct mtd_info *mtd, loff_t from, from 1463 drivers/mtd/nand/onenand/onenand_base.c onenand_mlc_read_ops_nolock(mtd, from, ops) : from 1464 drivers/mtd/nand/onenand/onenand_base.c onenand_read_ops_nolock(mtd, from, ops); from 1466 drivers/mtd/nand/onenand/onenand_base.c ret = onenand_read_oob_nolock(mtd, from, ops); from 1531 drivers/mtd/nand/onenand/onenand_base.c int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from, from 1540 drivers/mtd/nand/onenand/onenand_base.c pr_debug("%s: from = 0x%08x, len = %zi\n", __func__, (unsigned int)from, from 1547 drivers/mtd/nand/onenand/onenand_base.c if (unlikely((from + len) > mtd->size)) { from 1556 drivers/mtd/nand/onenand/onenand_base.c column = from & (mtd->oobsize - 1); from 1566 drivers/mtd/nand/onenand/onenand_base.c this->command(mtd, readcmd, from, mtd->oobsize); from 1568 drivers/mtd/nand/onenand/onenand_base.c onenand_update_bufferram(mtd, from, 0); from 1572 drivers/mtd/nand/onenand/onenand_base.c ret = onenand_recover_lsb(mtd, from, ret); from 1587 drivers/mtd/nand/onenand/onenand_base.c from += this->writesize; from 2905 drivers/mtd/nand/onenand/onenand_base.c static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len, from 2922 drivers/mtd/nand/onenand/onenand_base.c onenand_mlc_read_ops_nolock(mtd, from, &ops) : from 2923 drivers/mtd/nand/onenand/onenand_base.c onenand_read_ops_nolock(mtd, from, &ops); from 2986 drivers/mtd/nand/onenand/onenand_base.c static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len, from 3017 drivers/mtd/nand/onenand/onenand_base.c ret = onenand_otp_write_oob_nolock(mtd, from, &ops); from 3036 drivers/mtd/nand/onenand/onenand_base.c static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, from 3054 drivers/mtd/nand/onenand/onenand_base.c from += mtd->writesize * otp_pages; from 3060 drivers/mtd/nand/onenand/onenand_base.c if (mtd->writesize * otp_pages < from + len) from 3079 drivers/mtd/nand/onenand/onenand_base.c otpinfo->start = from; from 3083 drivers/mtd/nand/onenand/onenand_base.c from += mtd->writesize; from 3089 drivers/mtd/nand/onenand/onenand_base.c ret = action(mtd, from, len, &tmp_retlen, buf); from 3131 drivers/mtd/nand/onenand/onenand_base.c static int onenand_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, from 3134 drivers/mtd/nand/onenand/onenand_base.c return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_read, MTD_OTP_FACTORY); from 3163 drivers/mtd/nand/onenand/onenand_base.c static int onenand_read_user_prot_reg(struct mtd_info *mtd, loff_t from, from 3166 drivers/mtd/nand/onenand/onenand_base.c return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_read, MTD_OTP_USER); from 3179 drivers/mtd/nand/onenand/onenand_base.c static int onenand_write_user_prot_reg(struct mtd_info *mtd, loff_t from, from 3182 drivers/mtd/nand/onenand/onenand_base.c return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_write, MTD_OTP_USER); from 3193 drivers/mtd/nand/onenand/onenand_base.c static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, from 3211 drivers/mtd/nand/onenand/onenand_base.c from = 0; from 3233 drivers/mtd/nand/onenand/onenand_base.c ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER); from 62 drivers/mtd/nand/onenand/onenand_bbt.c loff_t from; from 81 drivers/mtd/nand/onenand/onenand_bbt.c from = 0; from 95 drivers/mtd/nand/onenand/onenand_bbt.c from + j * this->writesize + bd->offs, &ops); from 113 drivers/mtd/nand/onenand/onenand_bbt.c rgn = flexonenand_region(mtd, from); from 114 drivers/mtd/nand/onenand/onenand_bbt.c from += mtd->eraseregions[rgn].erasesize; from 116 drivers/mtd/nand/onenand/onenand_bbt.c from += (1 << bbm->bbt_erase_shift); from 3153 drivers/mtd/nand/raw/nand_base.c static int nand_do_read_ops(struct nand_chip *chip, loff_t from, from 3169 drivers/mtd/nand/raw/nand_base.c chipnr = (int)(from >> chip->chip_shift); from 3172 drivers/mtd/nand/raw/nand_base.c realpage = (int)(from >> chip->page_shift); from 3175 drivers/mtd/nand/raw/nand_base.c col = (int)(from & (mtd->writesize - 1)); from 3482 drivers/mtd/nand/raw/nand_base.c static int nand_do_read_oob(struct nand_chip *chip, loff_t from, from 3495 drivers/mtd/nand/raw/nand_base.c __func__, (unsigned long long)from, readlen); from 3501 drivers/mtd/nand/raw/nand_base.c chipnr = (int)(from >> chip->chip_shift); from 3505 drivers/mtd/nand/raw/nand_base.c realpage = (int)(from >> chip->page_shift); from 3560 drivers/mtd/nand/raw/nand_base.c static int nand_read_oob(struct mtd_info *mtd, loff_t from, from 3578 drivers/mtd/nand/raw/nand_base.c ret = nand_do_read_oob(chip, from, ops); from 3580 drivers/mtd/nand/raw/nand_base.c ret = nand_do_read_ops(chip, from, ops); from 172 drivers/mtd/nand/raw/nand_bbt.c loff_t from; from 180 drivers/mtd/nand/raw/nand_bbt.c from = ((loff_t)page) << this->page_shift; from 190 drivers/mtd/nand/raw/nand_bbt.c from += marker_len; from 193 drivers/mtd/nand/raw/nand_bbt.c res = mtd_read(mtd, from, len, &retlen, buf); from 197 drivers/mtd/nand/raw/nand_bbt.c from & ~mtd->writesize); from 201 drivers/mtd/nand/raw/nand_bbt.c from & ~mtd->writesize); from 243 drivers/mtd/nand/raw/nand_bbt.c from += len; from 467 drivers/mtd/nand/raw/nand_bbt.c loff_t from; from 474 drivers/mtd/nand/raw/nand_bbt.c from = 0; from 484 drivers/mtd/nand/raw/nand_bbt.c from = (loff_t)startblock << this->bbt_erase_shift; from 492 drivers/mtd/nand/raw/nand_bbt.c ret = scan_block_fast(this, bd, from, buf); from 499 drivers/mtd/nand/raw/nand_bbt.c i, (unsigned long long)from); from 503 drivers/mtd/nand/raw/nand_bbt.c from += (1 << this->bbt_erase_shift); from 475 drivers/mtd/nand/raw/nandsim.c unsigned long from = (i ? decile_max[i - 1] + 1 : 0); from 476 drivers/mtd/nand/raw/nandsim.c if (from > decile_max[i]) from 479 drivers/mtd/nand/raw/nandsim.c from, from 482 drivers/mtd/nand/spi/core.c static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, from 498 drivers/mtd/nand/spi/core.c nanddev_io_for_each_page(nand, from, ops, &iter) { from 148 drivers/mtd/nftlmount.c erasesize based on UnitSizeFactor. So the erasesize we read from the mtd from 228 drivers/mtd/parsers/sharpslpart.c loff_t from, from 240 drivers/mtd/parsers/sharpslpart.c log_num = mtd_div_by_eb((u32)from, mtd); from 241 drivers/mtd/parsers/sharpslpart.c final_log_num = mtd_div_by_eb(((u32)from + len - 1), mtd); from 248 drivers/mtd/parsers/sharpslpart.c block_ofs = mtd_mod_by_eb((u32)from, mtd); from 290 drivers/mtd/parsers/sharpslpart.c loff_t from, from 297 drivers/mtd/parsers/sharpslpart.c ret = sharpsl_nand_read_laddr(master, from, len, buf, ftl); from 372 drivers/mtd/spi-nor/aspeed-smc.c static ssize_t aspeed_smc_read_user(struct spi_nor *nor, loff_t from, from 380 drivers/mtd/spi-nor/aspeed-smc.c aspeed_smc_send_cmd_addr(nor, nor->read_opcode, from); from 964 drivers/mtd/spi-nor/cadence-quadspi.c loff_t from, size_t len) from 969 drivers/mtd/spi-nor/cadence-quadspi.c dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from; from 976 drivers/mtd/spi-nor/cadence-quadspi.c memcpy_fromio(buf, cqspi->ahb_base + from, len); from 1020 drivers/mtd/spi-nor/cadence-quadspi.c static ssize_t cqspi_read(struct spi_nor *nor, loff_t from, from 1035 drivers/mtd/spi-nor/cadence-quadspi.c ret = cqspi_direct_read_execute(nor, buf, from, len); from 1037 drivers/mtd/spi-nor/cadence-quadspi.c ret = cqspi_indirect_read_execute(nor, buf, from, len); from 268 drivers/mtd/spi-nor/hisi-sfc.c static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len, from 280 drivers/mtd/spi-nor/hisi-sfc.c from + offset, host->dma_buffer, trans, FMC_OP_READ); from 606 drivers/mtd/spi-nor/intel-spi.c static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len, from 635 drivers/mtd/spi-nor/intel-spi.c block_size = min_t(loff_t, from + block_size, from 636 drivers/mtd/spi-nor/intel-spi.c round_up(from + 1, SZ_4K)) - from; from 638 drivers/mtd/spi-nor/intel-spi.c writel(from, ispi->base + FADDR); from 659 drivers/mtd/spi-nor/intel-spi.c dev_err(ispi->dev, "read error: %llx: %#x\n", from, from 669 drivers/mtd/spi-nor/intel-spi.c from += block_size; from 263 drivers/mtd/spi-nor/mtk-quadspi.c static ssize_t mtk_nor_read(struct spi_nor *nor, loff_t from, size_t length, from 267 drivers/mtd/spi-nor/mtk-quadspi.c int addr = (int)from; from 171 drivers/mtd/spi-nor/nxp-spifi.c static ssize_t nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len, from 181 drivers/mtd/spi-nor/nxp-spifi.c memcpy_fromio(buf, spifi->flash_base + from, len); from 305 drivers/mtd/spi-nor/spi-nor.c static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from, from 310 drivers/mtd/spi-nor/spi-nor.c SPI_MEM_OP_ADDR(nor->addr_width, from, 1), from 335 drivers/mtd/spi-nor/spi-nor.c static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, from 339 drivers/mtd/spi-nor/spi-nor.c return spi_nor_spimem_read_data(nor, from, len, buf); from 341 drivers/mtd/spi-nor/spi-nor.c return nor->read(nor, from, len, buf); from 2544 drivers/mtd/spi-nor/spi-nor.c static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, from 2550 drivers/mtd/spi-nor/spi-nor.c dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); from 2557 drivers/mtd/spi-nor/spi-nor.c loff_t addr = from; from 2573 drivers/mtd/spi-nor/spi-nor.c from += ret; from 1302 drivers/mtd/ubi/eba.c int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, from 1315 drivers/mtd/ubi/eba.c dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); from 1366 drivers/mtd/ubi/eba.c if (vol->eba_tbl->entries[lnum].pnum != from) { from 1368 drivers/mtd/ubi/eba.c vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum); from 1381 drivers/mtd/ubi/eba.c err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size); from 1384 drivers/mtd/ubi/eba.c err, from); from 1453 drivers/mtd/ubi/eba.c ubi_assert(vol->eba_tbl->entries[lnum].pnum == from); from 153 drivers/mtd/ubi/gluebi.c static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, from 160 drivers/mtd/ubi/gluebi.c lnum = div_u64_rem(from, mtd->erasesize, &offs); from 897 drivers/mtd/ubi/ubi.h int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, from 40 drivers/net/dsa/sja1105/sja1105_main.c int from, int to, bool allow) from 43 drivers/net/dsa/sja1105/sja1105_main.c l2_fwd[from].bc_domain |= BIT(to); from 44 drivers/net/dsa/sja1105/sja1105_main.c l2_fwd[from].reach_port |= BIT(to); from 45 drivers/net/dsa/sja1105/sja1105_main.c l2_fwd[from].fl_domain |= BIT(to); from 47 drivers/net/dsa/sja1105/sja1105_main.c l2_fwd[from].bc_domain &= ~BIT(to); from 48 drivers/net/dsa/sja1105/sja1105_main.c l2_fwd[from].reach_port &= ~BIT(to); from 49 drivers/net/dsa/sja1105/sja1105_main.c l2_fwd[from].fl_domain &= ~BIT(to); from 144 drivers/net/ethernet/8390/mac8390.c int from, int count); from 146 drivers/net/ethernet/8390/mac8390.c const void *from, int count); from 661 drivers/net/ethernet/8390/mac8390.c static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, from 666 drivers/net/ethernet/8390/mac8390.c from <<= 1; /* word, skip overhead */ from 667 drivers/net/ethernet/8390/mac8390.c ptr = (unsigned char *)(dev->mem_start+from); from 669 drivers/net/ethernet/8390/mac8390.c if (from & 2) { from 686 drivers/net/ethernet/8390/mac8390.c const void *from, int count) from 689 drivers/net/ethernet/8390/mac8390.c const unsigned char *src = from; from 835 drivers/net/ethernet/8390/mac8390.c const unsigned short *from = fp; from 841 drivers/net/ethernet/8390/mac8390.c *to++ = *from++; from 847 drivers/net/ethernet/8390/mac8390.c const volatile unsigned short *from = (const void *)fp; from 853 drivers/net/ethernet/8390/mac8390.c *to++ = *from++; from 328 drivers/net/ethernet/amd/declance.c static void cp_to_buf(const int type, void *to, const void *from, int len) from 337 drivers/net/ethernet/amd/declance.c memcpy(to, from, len); from 341 drivers/net/ethernet/amd/declance.c fp = from; from 360 drivers/net/ethernet/amd/declance.c fp = from; from 387 drivers/net/ethernet/amd/declance.c static void cp_from_buf(const int type, void *to, const void *from, int len) from 396 drivers/net/ethernet/amd/declance.c memcpy(to, from, len); from 400 drivers/net/ethernet/amd/declance.c fp = from; from 421 drivers/net/ethernet/amd/declance.c fp = from; from 87 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) from 89 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c struct bnx2x_fastpath *from_fp = &bp->fp[from]; from 91 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; from 93 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; from 123 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * from 125 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c if (from == FCOE_IDX(bp)) { from 1004 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ_e *from = &fl->entries[idx]; from 1008 drivers/net/ethernet/chelsio/cxgb/sge.c to->addr_lo = from->addr_lo; from 1009 drivers/net/ethernet/chelsio/cxgb/sge.c to->addr_hi = from->addr_hi; from 1010 drivers/net/ethernet/chelsio/cxgb/sge.c to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); from 581 drivers/net/ethernet/chelsio/cxgb3/sge.c struct rx_desc *from = &q->desc[idx]; from 585 drivers/net/ethernet/chelsio/cxgb3/sge.c to->addr_lo = from->addr_lo; /* already big endian */ from 586 drivers/net/ethernet/chelsio/cxgb3/sge.c to->addr_hi = from->addr_hi; /* likewise */ from 1386 drivers/net/ethernet/chelsio/cxgb3/sge.c struct work_request_hdr *from = (struct work_request_hdr *)skb->data; from 1390 drivers/net/ethernet/chelsio/cxgb3/sge.c memcpy(&to[1], &from[1], len - sizeof(*from)); from 1392 drivers/net/ethernet/chelsio/cxgb3/sge.c skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); from 1394 drivers/net/ethernet/chelsio/cxgb3/sge.c to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP | from 1397 drivers/net/ethernet/chelsio/cxgb3/sge.c to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) | from 1635 drivers/net/ethernet/chelsio/cxgb3/sge.c struct work_request_hdr *from; from 1647 drivers/net/ethernet/chelsio/cxgb3/sge.c from = (struct work_request_hdr *)skb->data; from 1648 drivers/net/ethernet/chelsio/cxgb3/sge.c memcpy(&d->flit[1], &from[1], from 1649 drivers/net/ethernet/chelsio/cxgb3/sge.c skb_transport_offset(skb) - sizeof(*from)); from 1662 drivers/net/ethernet/chelsio/cxgb3/sge.c gen, from->wr_hi, from->wr_lo); from 3261 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c unsigned int from, unsigned int to) from 3265 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c string_get_size((u64)to - from + 1, 1, STRING_UNITS_2, buf, from 3267 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c seq_printf(seq, "%-15s %#x-%#x [%s]\n", name, from, to, buf); from 117 drivers/net/ethernet/ibm/ehea/ehea.h #define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1)) from 1724 drivers/net/ethernet/intel/fm10k/fm10k_pf.c static void fm10k_record_global_table_data(struct fm10k_global_table_data *from, from 1728 drivers/net/ethernet/intel/fm10k/fm10k_pf.c to->used = le32_to_cpu(from->used); from 1729 drivers/net/ethernet/intel/fm10k/fm10k_pf.c to->avail = le32_to_cpu(from->avail); from 735 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c u8 *from, *dest; from 739 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c from = src + ce_info->offset; from 745 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c src_byte = *from; from 775 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c u8 *from, *dest; from 780 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c from = src + ce_info->offset; from 789 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c src_word = *(u16 *)from; from 819 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c u8 *from, *dest; from 824 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c from = src + ce_info->offset; from 841 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c src_dword = *(u32 *)from; from 871 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c u8 *from, *dest; from 876 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c from = src + ce_info->offset; from 893 drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c src_qword = *(u64 *)from; from 1966 drivers/net/ethernet/intel/i40e/i40e_main.c struct hlist_head *from) from 1971 drivers/net/ethernet/intel/i40e/i40e_main.c hlist_for_each_entry_safe(f, h, from, hlist) { from 1989 drivers/net/ethernet/intel/i40e/i40e_main.c struct hlist_head *from) from 1994 drivers/net/ethernet/intel/i40e/i40e_main.c hlist_for_each_entry_safe(new, h, from, hlist) { from 2940 drivers/net/ethernet/intel/ice/ice_common.c u8 *from, *dest; from 2944 drivers/net/ethernet/intel/ice/ice_common.c from = src_ctx + ce_info->offset; from 2950 drivers/net/ethernet/intel/ice/ice_common.c src_byte = *from; from 2980 drivers/net/ethernet/intel/ice/ice_common.c u8 *from, *dest; from 2984 drivers/net/ethernet/intel/ice/ice_common.c from = src_ctx + ce_info->offset; from 2993 drivers/net/ethernet/intel/ice/ice_common.c src_word = *(u16 *)from; from 3023 drivers/net/ethernet/intel/ice/ice_common.c u8 *from, *dest; from 3027 drivers/net/ethernet/intel/ice/ice_common.c from = src_ctx + ce_info->offset; from 3044 drivers/net/ethernet/intel/ice/ice_common.c src_dword = *(u32 *)from; from 3074 drivers/net/ethernet/intel/ice/ice_common.c u8 *from, *dest; from 3078 drivers/net/ethernet/intel/ice/ice_common.c from = src_ctx + ce_info->offset; from 3095 drivers/net/ethernet/intel/ice/ice_common.c src_qword = *(u64 *)from; from 1116 drivers/net/ethernet/mellanox/mlx5/core/cmd.c static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size, from 1123 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (!to || !from) from 1127 drivers/net/ethernet/mellanox/mlx5/core/cmd.c memcpy(to->first.data, from, copy); from 1129 drivers/net/ethernet/mellanox/mlx5/core/cmd.c from += copy; from 1140 drivers/net/ethernet/mellanox/mlx5/core/cmd.c memcpy(block->data, from, copy); from 1141 drivers/net/ethernet/mellanox/mlx5/core/cmd.c from += copy; from 1150 drivers/net/ethernet/mellanox/mlx5/core/cmd.c static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) from 1156 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (!to || !from) from 1159 drivers/net/ethernet/mellanox/mlx5/core/cmd.c copy = min_t(int, size, sizeof(from->first.data)); from 1160 drivers/net/ethernet/mellanox/mlx5/core/cmd.c memcpy(to, from->first.data, copy); from 1164 drivers/net/ethernet/mellanox/mlx5/core/cmd.c next = from->next; from 425 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c const void *from = page_address(dma_info->page) + offset_from; from 431 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c skb_copy_to_linear_data(skb, from, len); from 174 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c struct mlxsw_sp_mr_tcam_erif_list *from) from 176 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c list_splice(&from->erif_sublists, &to->erif_sublists); from 177 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c to->kvdl_index = from->kvdl_index; from 903 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, from 908 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; from 928 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", from 931 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind); from 943 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id, from 948 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id); from 950 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c netdev_err(from->dev, "no span entry found\n"); from 954 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", from 956 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind); from 63 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, from 67 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id, from 3558 drivers/net/ethernet/micrel/ksz884x.c int from; from 3574 drivers/net/ethernet/micrel/ksz884x.c bits = len = from = to = 0; from 3578 drivers/net/ethernet/micrel/ksz884x.c data[to++] = pattern[from]; from 3580 drivers/net/ethernet/micrel/ksz884x.c ++from; from 3590 drivers/net/ethernet/micrel/ksz884x.c from += 8; from 3592 drivers/net/ethernet/micrel/ksz884x.c } while (from < (int) frame_size); from 3595 drivers/net/ethernet/micrel/ksz884x.c val <<= (from % 8); from 354 drivers/net/ethernet/myricom/myri10ge/myri10ge.c #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8) from 2060 drivers/net/ethernet/neterion/vxge/vxge-config.c struct __vxge_hw_ring *ring, u32 from, from 2067 drivers/net/ethernet/neterion/vxge/vxge-config.c from_item = mempoolh->items_arr[from]; from 427 drivers/net/ethernet/sfc/bitfield.h #define EFX_AND_OWORD(oword, from, mask) \ from 429 drivers/net/ethernet/sfc/bitfield.h (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \ from 430 drivers/net/ethernet/sfc/bitfield.h (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \ from 433 drivers/net/ethernet/sfc/bitfield.h #define EFX_AND_QWORD(qword, from, mask) \ from 434 drivers/net/ethernet/sfc/bitfield.h (qword).u64[0] = (from).u64[0] & (mask).u64[0] from 436 drivers/net/ethernet/sfc/bitfield.h #define EFX_OR_OWORD(oword, from, mask) \ from 438 drivers/net/ethernet/sfc/bitfield.h (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \ from 439 drivers/net/ethernet/sfc/bitfield.h (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \ from 427 drivers/net/ethernet/sfc/falcon/bitfield.h #define EF4_AND_OWORD(oword, from, mask) \ from 429 drivers/net/ethernet/sfc/falcon/bitfield.h (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \ from 430 drivers/net/ethernet/sfc/falcon/bitfield.h (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \ from 433 drivers/net/ethernet/sfc/falcon/bitfield.h #define EF4_OR_OWORD(oword, from, mask) \ from 435 drivers/net/ethernet/sfc/falcon/bitfield.h (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \ from 436 drivers/net/ethernet/sfc/falcon/bitfield.h (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \ from 744 drivers/net/ethernet/socionext/netsec.c static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num) from 747 drivers/net/ethernet/socionext/netsec.c u16 idx = from; from 1024 drivers/net/ethernet/toshiba/spider_net.c int from = (chain->num_desc + off - cnt) % chain->num_desc; from 1027 drivers/net/ethernet/toshiba/spider_net.c "with stat=0x%08x\n", cnt, from, to, cstat); from 117 drivers/net/fddi/defza.c static inline void fza_reads(const void __iomem *from, void *to, from 121 drivers/net/fddi/defza.c const u64 __iomem *src = from; from 134 drivers/net/fddi/defza.c const u32 __iomem *src = from; from 142 drivers/net/fddi/defza.c static inline void fza_writes(const void *from, void __iomem *to, from 146 drivers/net/fddi/defza.c const u64 *src = from; from 159 drivers/net/fddi/defza.c const u32 *src = from; from 167 drivers/net/fddi/defza.c static inline void fza_moves(const void __iomem *from, void __iomem *to, from 171 drivers/net/fddi/defza.c const u64 __iomem *src = from; from 184 drivers/net/fddi/defza.c const u32 __iomem *src = from; from 555 drivers/net/fddi/skfp/pmf.c char *from ; from 888 drivers/net/fddi/skfp/pmf.c from = mib_addr + pt->p_offset ; from 911 drivers/net/fddi/skfp/pmf.c to[2] = *from++ ; from 912 drivers/net/fddi/skfp/pmf.c to[3] = *from++ ; from 915 drivers/net/fddi/skfp/pmf.c to[3] = *from++ ; from 916 drivers/net/fddi/skfp/pmf.c to[2] = *from++ ; from 919 drivers/net/fddi/skfp/pmf.c to[2] = *from++ ; from 920 drivers/net/fddi/skfp/pmf.c to[3] = *from++ ; from 929 drivers/net/fddi/skfp/pmf.c to[1] = *from++ ; from 930 drivers/net/fddi/skfp/pmf.c to[0] = *from++ ; from 932 drivers/net/fddi/skfp/pmf.c to[0] = *from++ ; from 933 drivers/net/fddi/skfp/pmf.c to[1] = *from++ ; from 946 drivers/net/fddi/skfp/pmf.c to[3] = *from++ ; from 955 drivers/net/fddi/skfp/pmf.c to[3] = *from++ ; from 956 drivers/net/fddi/skfp/pmf.c to[2] = *from++ ; from 957 drivers/net/fddi/skfp/pmf.c to[1] = *from++ ; from 958 drivers/net/fddi/skfp/pmf.c to[0] = *from++ ; from 960 drivers/net/fddi/skfp/pmf.c to[0] = *from++ ; from 961 drivers/net/fddi/skfp/pmf.c to[1] = *from++ ; from 962 drivers/net/fddi/skfp/pmf.c to[2] = *from++ ; from 963 drivers/net/fddi/skfp/pmf.c to[3] = *from++ ; from 973 drivers/net/fddi/skfp/pmf.c to[2] = *from++ ; from 974 drivers/net/fddi/skfp/pmf.c to[3] = *from++ ; from 981 drivers/net/fddi/skfp/pmf.c to[0] = *from++ ; from 982 drivers/net/fddi/skfp/pmf.c to[1] = *from++ ; from 983 drivers/net/fddi/skfp/pmf.c to[2] = *from++ ; from 984 drivers/net/fddi/skfp/pmf.c to[3] = *from++ ; from 993 drivers/net/fddi/skfp/pmf.c memcpy((char *) to+2,(char *) from,6) ; from 995 drivers/net/fddi/skfp/pmf.c from += 8 ; from 1001 drivers/net/fddi/skfp/pmf.c memcpy((char *) to,(char *) from,8) ; from 1003 drivers/net/fddi/skfp/pmf.c from += 8 ; from 1009 drivers/net/fddi/skfp/pmf.c memcpy((char *) to,(char *) from,32) ; from 1011 drivers/net/fddi/skfp/pmf.c from += 32 ; from 1017 drivers/net/fddi/skfp/pmf.c to[0] = *from++ ; from 1018 drivers/net/fddi/skfp/pmf.c to[1] = *from++ ; from 1019 drivers/net/fddi/skfp/pmf.c to[2] = *from++ ; from 1020 drivers/net/fddi/skfp/pmf.c to[3] = *from++ ; from 1021 drivers/net/fddi/skfp/pmf.c to[4] = *from++ ; from 1022 drivers/net/fddi/skfp/pmf.c to[5] = *from++ ; from 1023 drivers/net/fddi/skfp/pmf.c to[6] = *from++ ; from 1024 drivers/net/fddi/skfp/pmf.c to[7] = *from++ ; from 1078 drivers/net/fddi/skfp/pmf.c char *from ; from 1098 drivers/net/fddi/skfp/pmf.c from = (char *) (pa + 1 ) ; from 1112 drivers/net/fddi/skfp/pmf.c from += 4 ; /* skip index */ from 1121 drivers/net/fddi/skfp/pmf.c from += 4 ; /* skip index */ from 1130 drivers/net/fddi/skfp/pmf.c from += 4 ; /* skip index */ from 1186 drivers/net/fddi/skfp/pmf.c if (from[0] | from[1]) from 1190 drivers/net/fddi/skfp/pmf.c to[0] = from[2] ; from 1191 drivers/net/fddi/skfp/pmf.c to[1] = from[3] ; from 1194 drivers/net/fddi/skfp/pmf.c to[1] = from[2] ; from 1195 drivers/net/fddi/skfp/pmf.c to[0] = from[3] ; from 1198 drivers/net/fddi/skfp/pmf.c to[0] = from[2] ; from 1199 drivers/net/fddi/skfp/pmf.c to[1] = from[3] ; from 1201 drivers/net/fddi/skfp/pmf.c from += 4 ; from 1210 drivers/net/fddi/skfp/pmf.c if (from[0] | from[1] | from[2]) from 1212 drivers/net/fddi/skfp/pmf.c to[0] = from[3] ; from 1214 drivers/net/fddi/skfp/pmf.c from += 4 ; from 1224 drivers/net/fddi/skfp/pmf.c to[3] = *from++ ; from 1225 drivers/net/fddi/skfp/pmf.c to[2] = *from++ ; from 1226 drivers/net/fddi/skfp/pmf.c to[1] = *from++ ; from 1227 drivers/net/fddi/skfp/pmf.c to[0] = *from++ ; from 1229 drivers/net/fddi/skfp/pmf.c to[0] = *from++ ; from 1230 drivers/net/fddi/skfp/pmf.c to[1] = *from++ ; from 1231 drivers/net/fddi/skfp/pmf.c to[2] = *from++ ; from 1232 drivers/net/fddi/skfp/pmf.c to[3] = *from++ ; from 1241 drivers/net/fddi/skfp/pmf.c memcpy(to,from+2,6) ; from 1243 drivers/net/fddi/skfp/pmf.c from += 8 ; from 1250 drivers/net/fddi/skfp/pmf.c memcpy(to,from,4) ; from 1252 drivers/net/fddi/skfp/pmf.c from += 4 ; from 1259 drivers/net/fddi/skfp/pmf.c memcpy(to,from,8) ; from 1261 drivers/net/fddi/skfp/pmf.c from += 8 ; from 1268 drivers/net/fddi/skfp/pmf.c memcpy(to,from,32) ; from 1270 drivers/net/fddi/skfp/pmf.c from += 32 ; from 1275 drivers/net/fddi/skfp/pmf.c to[0] = *from++ ; from 1276 drivers/net/fddi/skfp/pmf.c to[1] = *from++ ; from 1277 drivers/net/fddi/skfp/pmf.c to[2] = *from++ ; from 1278 drivers/net/fddi/skfp/pmf.c to[3] = *from++ ; from 1279 drivers/net/fddi/skfp/pmf.c to[4] = *from++ ; from 1280 drivers/net/fddi/skfp/pmf.c to[5] = *from++ ; from 1281 drivers/net/fddi/skfp/pmf.c to[6] = *from++ ; from 1282 drivers/net/fddi/skfp/pmf.c to[7] = *from++ ; from 623 drivers/net/tap.c struct iov_iter *from, int noblock) from 628 drivers/net/tap.c unsigned long total_len = iov_iter_count(from); from 647 drivers/net/tap.c if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from)) from 649 drivers/net/tap.c iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr)); from 676 drivers/net/tap.c i = *from; from 697 drivers/net/tap.c err = zerocopy_sg_from_iter(skb, from); from 699 drivers/net/tap.c err = skb_copy_datagram_from_iter(skb, 0, from, len); from 758 drivers/net/tap.c static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from) from 763 drivers/net/tap.c return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK); from 1657 drivers/net/tun.c struct iov_iter *from, from 1683 drivers/net/tun.c len, from); from 1745 drivers/net/tun.c void *msg_control, struct iov_iter *from, from 1750 drivers/net/tun.c size_t total_len = iov_iter_count(from); from 1767 drivers/net/tun.c if (!copy_from_iter_full(&pi, sizeof(pi), from)) from 1778 drivers/net/tun.c if (!copy_from_iter_full(&gso, sizeof(gso), from)) from 1787 drivers/net/tun.c iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); from 1800 drivers/net/tun.c struct iov_iter i = *from; from 1820 drivers/net/tun.c skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); from 1838 drivers/net/tun.c skb = tun_napi_alloc_frags(tfile, copylen, from); from 1858 drivers/net/tun.c err = zerocopy_sg_from_iter(skb, from); from 1860 drivers/net/tun.c err = skb_copy_datagram_from_iter(skb, 0, from, len); from 2025 drivers/net/tun.c static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) from 2035 drivers/net/tun.c result = tun_get_user(tun, tfile, NULL, from, from 799 drivers/net/wireless/ath/wil6210/cfg80211.c static bool wil_is_safe_switch(enum nl80211_iftype from, from 802 drivers/net/wireless/ath/wil6210/cfg80211.c if (from == NL80211_IFTYPE_STATION && from 2362 drivers/net/wireless/ath/wil6210/debugfs.c blob->size = map->to - map->from; from 425 drivers/net/wireless/ath/wil6210/wil6210.h u32 from; /* linker address - from, inclusive */ from 35 drivers/net/wireless/ath/wil6210/wil_crash_dump.c host_max = map->host + (map->to - map->from); from 46 drivers/net/wireless/ath/wil6210/wil_crash_dump.c tmp_max = map->host + (map->to - map->from); from 88 drivers/net/wireless/ath/wil6210/wil_crash_dump.c len = map->to - map->from; from 252 drivers/net/wireless/ath/wil6210/wmi.c ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to))) from 253 drivers/net/wireless/ath/wil6210/wmi.c return x + fw_mapping[i].host - fw_mapping[i].from; from 183 drivers/net/wireless/wl3501_cs.c struct iw_mgmt_info_element *from) from 185 drivers/net/wireless/wl3501_cs.c iw_set_mgmt_info_element(from->id, to, from->data, from->len); from 32 drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c static int bits(u32 rw, int from, int to) from 35 drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c rw >>= from; from 314 drivers/nubus/nubus.c struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from) from 316 drivers/nubus/nubus.c if (list_is_last(&from->list, &nubus_func_rsrcs)) from 318 drivers/nubus/nubus.c return list_next_entry(from, list); from 829 drivers/of/address.c struct device_node *of_find_matching_node_by_address(struct device_node *from, from 833 drivers/of/address.c struct device_node *dn = of_find_matching_node(from, matches); from 997 drivers/of/base.c struct device_node *of_find_node_by_name(struct device_node *from, from 1004 drivers/of/base.c for_each_of_allnodes_from(from, np) from 1007 drivers/of/base.c of_node_put(from); from 1025 drivers/of/base.c struct device_node *of_find_node_by_type(struct device_node *from, from 1032 drivers/of/base.c for_each_of_allnodes_from(from, np) from 1035 drivers/of/base.c of_node_put(from); from 1055 drivers/of/base.c struct device_node *of_find_compatible_node(struct device_node *from, from 1062 drivers/of/base.c for_each_of_allnodes_from(from, np) from 1066 drivers/of/base.c of_node_put(from); from 1084 drivers/of/base.c struct device_node *of_find_node_with_property(struct device_node *from, from 1092 drivers/of/base.c for_each_of_allnodes_from(from, np) { from 1101 drivers/of/base.c of_node_put(from); from 1162 drivers/of/base.c struct device_node *of_find_matching_node_and_match(struct device_node *from, from 1174 drivers/of/base.c for_each_of_allnodes_from(from, np) { from 1182 drivers/of/base.c of_node_put(from); from 160 drivers/pci/search.c struct pci_bus *pci_find_next_bus(const struct pci_bus *from) from 167 drivers/pci/search.c n = from ? from->node.next : pci_root_buses.next; from 265 drivers/pci/search.c struct pci_dev *from) from 272 drivers/pci/search.c if (from) from 273 drivers/pci/search.c dev_start = &from->dev; from 278 drivers/pci/search.c pci_dev_put(from); from 300 drivers/pci/search.c struct pci_dev *from) from 309 drivers/pci/search.c return pci_get_dev_by_id(&id, from); from 328 drivers/pci/search.c struct pci_dev *from) from 330 drivers/pci/search.c return pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); from 348 drivers/pci/search.c struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) from 359 drivers/pci/search.c return pci_get_dev_by_id(&id, from); from 524 drivers/perf/xgene_pmu.c XGENE_PMU_EVENT_ATTR(mcu-req-from-lastload, 0x21), from 525 drivers/perf/xgene_pmu.c XGENE_PMU_EVENT_ATTR(mcu-req-from-bypass, 0x22), from 329 drivers/pnp/card.c const char *id, struct pnp_dev *from) from 341 drivers/pnp/card.c if (!from) { from 344 drivers/pnp/card.c if (from->card != card) from 346 drivers/pnp/card.c pos = from->card_list.next; from 25 drivers/pnp/isapnp/compat.c struct pnp_card *from) from 34 drivers/pnp/isapnp/compat.c list = from ? from->global_list.next : pnp_cards.next; from 47 drivers/pnp/isapnp/compat.c unsigned short function, struct pnp_dev *from) from 58 drivers/pnp/isapnp/compat.c if (from) from 59 drivers/pnp/isapnp/compat.c list = from->global_list.next; from 73 drivers/pnp/isapnp/compat.c if (from) { from 74 drivers/pnp/isapnp/compat.c list = from->card_list.next; from 75 drivers/pnp/isapnp/compat.c if (from->card != card) /* something is wrong */ from 39 drivers/pwm/core.c unsigned int from = 0; from 46 drivers/pwm/core.c from = pwm; from 48 drivers/pwm/core.c start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, from, from 135 drivers/rapidio/rio-scan.c static u16 rio_destid_next(struct rio_net *net, u16 from) from 141 drivers/rapidio/rio-scan.c destid = find_next_bit(idtab->table, idtab->max, from); from 840 drivers/rapidio/rio.c struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from) from 846 drivers/rapidio/rio.c n = from ? from->global_list.next : rio_devices.next; from 1346 drivers/rapidio/rio.c u8 hopcount, u32 from) from 1350 drivers/rapidio/rio.c if (from == 0) { from 1360 drivers/rapidio/rio.c rio_local_read_config_32(port, from, ®_val); from 1363 drivers/rapidio/rio.c from, ®_val); from 1433 drivers/rapidio/rio.c u16 asm_vid, u16 asm_did, struct rio_dev *from) from 1440 drivers/rapidio/rio.c n = from ? from->global_list.next : rio_devices.next; from 1453 drivers/rapidio/rio.c rio_dev_put(from); from 1474 drivers/rapidio/rio.c struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from) from 1476 drivers/rapidio/rio.c return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from); from 23 drivers/rapidio/rio.h u8 hopcount, u32 from); from 35 drivers/rapidio/rio.h extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from); from 218 drivers/rpmsg/rpmsg_char.c struct iov_iter *from) from 222 drivers/rpmsg/rpmsg_char.c size_t len = iov_iter_count(from); from 230 drivers/rpmsg/rpmsg_char.c if (!copy_from_iter_full(kbuf, len, from)) { from 312 drivers/s390/block/dasd_devmap.c int from, from_id0, from_id1; from 331 drivers/s390/block/dasd_devmap.c if (dasd_busid(from_str, &from_id0, &from_id1, &from)) { from 336 drivers/s390/block/dasd_devmap.c to = from; from 344 drivers/s390/block/dasd_devmap.c if (from_id0 != to_id0 || from_id1 != to_id1 || from > to) { from 358 drivers/s390/block/dasd_devmap.c while (from <= to) { from 359 drivers/s390/block/dasd_devmap.c sprintf(bus_id, "%01x.%01x.%04x", from_id0, from_id1, from++); from 3596 drivers/s390/block/dasd_eckd.c static int count_exts(unsigned int from, unsigned int to, int trks_per_ext) from 3602 drivers/s390/block/dasd_eckd.c if (from == to) from 3606 drivers/s390/block/dasd_eckd.c if (from % trks_per_ext != 0) { from 3607 drivers/s390/block/dasd_eckd.c tmp = from + trks_per_ext - (from % trks_per_ext) - 1; from 3610 drivers/s390/block/dasd_eckd.c cur_pos = tmp - from + 1; from 3614 drivers/s390/block/dasd_eckd.c if (to - (from + cur_pos) + 1 >= trks_per_ext) { from 3616 drivers/s390/block/dasd_eckd.c count += (tmp - (from + cur_pos) + 1) / trks_per_ext; from 3747 drivers/s390/block/dasd_eckd.c unsigned int from, unsigned int to) from 3767 drivers/s390/block/dasd_eckd.c cur_pos = from; from 5395 drivers/s390/block/dasd_eckd.c dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) from 5401 drivers/s390/block/dasd_eckd.c while (from <= to) { from 5404 drivers/s390/block/dasd_eckd.c from, ((int *) from)[0], ((int *) from)[1]); from 5407 drivers/s390/block/dasd_eckd.c if (from->flags & CCW_FLAG_IDA) from 5408 drivers/s390/block/dasd_eckd.c datap = (char *) *((addr_t *) (addr_t) from->cda); from 5410 drivers/s390/block/dasd_eckd.c datap = (char *) ((addr_t) from->cda); from 5413 drivers/s390/block/dasd_eckd.c for (count = 0; count < from->count && count < 32; count++) { from 5419 drivers/s390/block/dasd_eckd.c from++; from 5453 drivers/s390/block/dasd_eckd.c struct ccw1 *first, *last, *fail, *from, *to; from 5525 drivers/s390/block/dasd_eckd.c from = ++to; from 5528 drivers/s390/block/dasd_eckd.c if (from < fail - 2) { from 5529 drivers/s390/block/dasd_eckd.c from = fail - 2; /* there is a gap - print header */ from 5533 drivers/s390/block/dasd_eckd.c len += dasd_eckd_dump_ccw_range(from, to, page + len); from 5536 drivers/s390/block/dasd_eckd.c from = max(from, ++to); from 5537 drivers/s390/block/dasd_eckd.c if (from < last - 1) { from 5538 drivers/s390/block/dasd_eckd.c from = last - 1; /* there is a gap - print header */ from 5541 drivers/s390/block/dasd_eckd.c len += dasd_eckd_dump_ccw_range(from, last, page + len); from 316 drivers/s390/char/sclp_vt220.c int from; from 329 drivers/s390/char/sclp_vt220.c for (from=0, to=0; from 330 drivers/s390/char/sclp_vt220.c (from < count) && (to < sclp_vt220_space_left(request)); from 331 drivers/s390/char/sclp_vt220.c from++) { from 333 drivers/s390/char/sclp_vt220.c c = msg[from]; from 347 drivers/s390/char/sclp_vt220.c return from; from 964 drivers/s390/char/tape_34xx.c tape_34xx_delete_sbid_from(struct tape_device *device, int from) from 977 drivers/s390/char/tape_34xx.c if (sbid->bid.block >= from) { from 50 drivers/s390/cio/blacklist.c unsigned int to_ssid, unsigned int from, from 53 drivers/s390/cio/blacklist.c if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) { from 56 drivers/s390/cio/blacklist.c from_ssid, from, to_ssid, to); from 62 drivers/s390/cio/blacklist.c (from <= to))) { from 64 drivers/s390/cio/blacklist.c set_bit(from, bl_dev[from_ssid]); from 66 drivers/s390/cio/blacklist.c clear_bit(from, bl_dev[from_ssid]); from 67 drivers/s390/cio/blacklist.c from++; from 68 drivers/s390/cio/blacklist.c if (from > __MAX_SUBCHANNEL) { from 70 drivers/s390/cio/blacklist.c from = 0; from 152 drivers/s390/cio/blacklist.c unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; from 172 drivers/s390/cio/blacklist.c from = 0; from 180 drivers/s390/cio/blacklist.c from = ipl_info.data.ccw.dev_id.devno; from 185 drivers/s390/cio/blacklist.c from = ipl_info.data.fcp.dev_id.devno; from 191 drivers/s390/cio/blacklist.c to = from; from 198 drivers/s390/cio/blacklist.c from = to = console_devno; from 201 drivers/s390/cio/blacklist.c &from_ssid, &from, msgtrigger); from 210 drivers/s390/cio/blacklist.c to = from; from 215 drivers/s390/cio/blacklist.c rc = blacklist_range(ra, from_ssid, to_ssid, from, to, from 107 drivers/s390/cio/idset.c void idset_add_set(struct idset *to, struct idset *from) from 109 drivers/s390/cio/idset.c int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id); from 111 drivers/s390/cio/idset.c bitmap_or(to->bitmap, to->bitmap, from->bitmap, len); from 23 drivers/s390/cio/idset.h void idset_add_set(struct idset *to, struct idset *from); from 197 drivers/s390/cio/vfio_ccw_cp.c u64 from; from 213 drivers/s390/cio/vfio_ccw_cp.c from = pa.pa_pfn[i] << PAGE_SHIFT; from 216 drivers/s390/cio/vfio_ccw_cp.c from += iova & (PAGE_SIZE - 1); from 221 drivers/s390/cio/vfio_ccw_cp.c memcpy(to + (n - l), (void *)from, m); from 24 drivers/s390/net/smsgiucv.c void (*callback)(const char *from, char *str); from 92 drivers/s390/net/smsgiucv.c void (*callback)(const char *from, char *str)) from 109 drivers/s390/net/smsgiucv.c void (*callback)(const char *from, from 67 drivers/s390/net/smsgiucv_app.c static struct smsg_app_event *smsg_app_event_alloc(const char *from, from 90 drivers/s390/net/smsgiucv_app.c snprintf(ev->envp[0], ENV_SENDER_LEN, ENV_SENDER_STR "%s", from); from 121 drivers/s390/net/smsgiucv_app.c static void smsg_app_callback(const char *from, char *msg) from 127 drivers/s390/net/smsgiucv_app.c if (sender && strlen(sender) > 0 && strcmp(from, sender) != 0) from 138 drivers/s390/net/smsgiucv_app.c se = smsg_app_event_alloc(from, msg); from 1470 drivers/s390/virtio/virtio_ccw.c unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; from 1477 drivers/s390/virtio/virtio_ccw.c &from_ssid, &from); from 1484 drivers/s390/virtio/virtio_ccw.c ((from_ssid == to_ssid) && (from > to))) from 1489 drivers/s390/virtio/virtio_ccw.c to = from; from 1494 drivers/s390/virtio/virtio_ccw.c ((from_ssid == to_ssid) && (from <= to))) { from 1495 drivers/s390/virtio/virtio_ccw.c set_bit(from, devs_no_auto[from_ssid]); from 1496 drivers/s390/virtio/virtio_ccw.c from++; from 1497 drivers/s390/virtio/virtio_ccw.c if (from > __MAX_SUBCHANNEL) { from 1499 drivers/s390/virtio/virtio_ccw.c from = 0; from 362 drivers/scsi/arm/fas216.c static struct { int command; void *from; } cmd_list[8]; from 368 drivers/scsi/arm/fas216.c cmd_list[cmd_ptr].from = __builtin_return_address(0); from 396 drivers/scsi/arm/fas216.c printk("%02x:%p ", cmd_list[i].command, cmd_list[i].from); from 1069 drivers/scsi/esas2r/esas2r.h bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from, from 1071 drivers/scsi/esas2r/esas2r.h bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from, from 1001 drivers/scsi/esas2r/esas2r_flash.c u32 from, from 1016 drivers/scsi/esas2r/esas2r_flash.c iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE); from 1018 drivers/scsi/esas2r/esas2r_flash.c iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE); from 1021 drivers/scsi/esas2r/esas2r_flash.c offset = from & (WINDOW_SIZE - 1); from 1027 drivers/scsi/esas2r/esas2r_flash.c from += len; from 1277 drivers/scsi/esas2r/esas2r_main.c u32 from, from 1287 drivers/scsi/esas2r/esas2r_main.c iatvr = (from & -(signed int)MW_DATA_WINDOW_SIZE); from 1291 drivers/scsi/esas2r/esas2r_main.c offset = from & (MW_DATA_WINDOW_SIZE - 1); from 1297 drivers/scsi/esas2r/esas2r_main.c from += len; from 37 drivers/scsi/fnic/fnic_trace.h const void *from, from 4743 drivers/scsi/lpfc/lpfc_bsg.c uint8_t *from; from 4837 drivers/scsi/lpfc/lpfc_bsg.c from = pmbx; from 4838 drivers/scsi/lpfc/lpfc_bsg.c ext = from + sizeof(MAILBOX_t); from 725 drivers/scsi/mvsas/mv_64xx.c int buf_len, int from, void *prd) from 731 drivers/scsi/mvsas/mv_64xx.c buf_prd += from; from 732 drivers/scsi/mvsas/mv_64xx.c for (i = 0; i < MAX_SG_ENTRY - from; i++) { from 987 drivers/scsi/mvsas/mv_94xx.c int buf_len, int from, void *prd) from 995 drivers/scsi/mvsas/mv_94xx.c buf_prd += from; from 1005 drivers/scsi/mvsas/mv_94xx.c for (i = from; i < MAX_SG_ENTRY; i++, ++buf_prd) { from 154 drivers/scsi/mvsas/mv_sas.h int buf_len, int from, void *prd); from 1808 drivers/scsi/qla2xxx/qla_def.h #define SET_TARGET_ID(ha, to, from) \ from 1811 drivers/scsi/qla2xxx/qla_def.h to.extended = cpu_to_le16(from); \ from 1813 drivers/scsi/qla2xxx/qla_def.h to.id.standard = (uint8_t)from; \ from 286 drivers/scsi/scsi_devinfo.c char *from, int compatible) from 290 drivers/scsi/scsi_devinfo.c from_length = strlen(from); from 292 drivers/scsi/scsi_devinfo.c strncpy(to, from, to_length); from 301 drivers/scsi/scsi_devinfo.c __func__, name, from); from 26 drivers/scsi/snic/snic_trc.h const void *from, from 249 drivers/sh/maple/maple.c int port, unit, from, to, len; from 255 drivers/sh/maple/maple.c from = port << 6; from 264 drivers/sh/maple/maple.c mq->command | (to << 8) | (from << 16) | (len << 24); from 796 drivers/spi/spi-bcm-qspi.c u32 addr = 0, len, rdlen, len_words, from = 0; from 805 drivers/spi/spi-bcm-qspi.c from = op->addr.val; from 814 drivers/spi/spi-bcm-qspi.c addr = from & 0xff000000; from 820 drivers/spi/spi-bcm-qspi.c addr = from; from 822 drivers/spi/spi-bcm-qspi.c addr = from & 0x00ffffff; from 461 drivers/spi/spi-ti-qspi.c loff_t from) from 464 drivers/spi/spi-ti-qspi.c dma_addr_t dma_src = qspi->mmap_phys_base + from; from 534 drivers/spi/spi-ti-qspi.c u32 from = 0; from 543 drivers/spi/spi-ti-qspi.c from = op->addr.val; from 544 drivers/spi/spi-ti-qspi.c if (from + op->data.nbytes > qspi->mmap_size) from 560 drivers/spi/spi-ti-qspi.c ret = ti_qspi_dma_xfer_sg(qspi, sgt, from); from 564 drivers/spi/spi-ti-qspi.c ret = ti_qspi_dma_bounce_buffer(qspi, from, from 569 drivers/spi/spi-ti-qspi.c memcpy_fromio(op->data.buf.in, qspi->mmap_base + from, from 69 drivers/staging/fbtft/fbtft.h void (*mkdirty)(struct fb_info *info, int from, int to); from 816 drivers/staging/speakup/main.c static int say_from_to(struct vc_data *vc, u_long from, u_long to, from 824 drivers/staging/speakup/main.c spk_attr = get_attributes(vc, (u_short *)from); from 825 drivers/staging/speakup/main.c while (from < to) { from 826 drivers/staging/speakup/main.c buf[i++] = get_char(vc, (u_short *)from, &tmp); from 827 drivers/staging/speakup/main.c from += 2; from 846 drivers/staging/speakup/main.c static void say_line_from_to(struct vc_data *vc, u_long from, u_long to, from 852 drivers/staging/speakup/main.c start += from * 2; from 933 drivers/staging/speakup/main.c static void say_screen_from_to(struct vc_data *vc, u_long from, u_long to) from 937 drivers/staging/speakup/main.c if (from > 0) from 938 drivers/staging/speakup/main.c start += from * vc->vc_size_row; from 942 drivers/staging/speakup/main.c for (from = start; from < end; from = to) { from 943 drivers/staging/speakup/main.c to = from + vc->vc_size_row; from 944 drivers/staging/speakup/main.c say_from_to(vc, from, to, 1); from 955 drivers/staging/speakup/main.c u_long start, end, from, to; from 964 drivers/staging/speakup/main.c from = start + (win_left * 2); from 966 drivers/staging/speakup/main.c say_from_to(vc, from, to, 1); from 669 drivers/target/target_core_user.c void *from, *to = NULL; from 676 drivers/target/target_core_user.c from = kmap_atomic(sg_page(sg)) + sg->offset; from 723 drivers/target/target_core_user.c from + sg->length - sg_remaining, from 731 drivers/target/target_core_user.c kunmap_atomic(from - sg->offset); from 744 drivers/target/target_core_user.c void *from = NULL, *to; from 774 drivers/target/target_core_user.c if (from) from 775 drivers/target/target_core_user.c kunmap_atomic(from); from 780 drivers/target/target_core_user.c from = kmap_atomic(page); from 787 drivers/target/target_core_user.c tcmu_flush_dcache_range(from, copy_bytes); from 788 drivers/target/target_core_user.c memcpy(to + sg->length - sg_remaining, from + offset, from 799 drivers/target/target_core_user.c if (from) from 800 drivers/target/target_core_user.c kunmap_atomic(from); from 62 drivers/target/tcm_fc/tfc_io.c void *from; from 142 drivers/target/tcm_fc/tfc_io.c from = kmap_atomic(page + (mem_off >> PAGE_SHIFT)); from 143 drivers/target/tcm_fc/tfc_io.c page_addr = from; from 144 drivers/target/tcm_fc/tfc_io.c from += offset_in_page(mem_off); from 147 drivers/target/tcm_fc/tfc_io.c memcpy(to, from, tlen); from 211 drivers/target/tcm_fc/tfc_io.c void *from; from 267 drivers/target/tcm_fc/tfc_io.c from = fc_frame_payload_get(fp, 0); from 307 drivers/target/tcm_fc/tfc_io.c memcpy(to, from, tlen); from 310 drivers/target/tcm_fc/tfc_io.c from += tlen; from 172 drivers/tty/n_tty.c void *from = read_buf_addr(ldata, tail); from 176 drivers/tty/n_tty.c tty_audit_add_data(tty, from, size); from 177 drivers/tty/n_tty.c uncopied = copy_to_user(to, from, size); from 178 drivers/tty/n_tty.c zero_buffer(tty, from, size - uncopied); from 183 drivers/tty/n_tty.c from = ldata->read_buf; from 186 drivers/tty/n_tty.c tty_audit_add_data(tty, from, n); from 187 drivers/tty/n_tty.c uncopied = copy_to_user(to, from, n); from 188 drivers/tty/n_tty.c zero_buffer(tty, from, n - uncopied); from 1978 drivers/tty/n_tty.c unsigned char *from = read_buf_addr(ldata, tail); from 1979 drivers/tty/n_tty.c retval = copy_to_user(*b, from, n); from 1981 drivers/tty/n_tty.c is_eof = n == 1 && *from == EOF_CHAR(tty); from 1982 drivers/tty/n_tty.c tty_audit_add_data(tty, from, n); from 1983 drivers/tty/n_tty.c zero_buffer(tty, from, n); from 4727 drivers/tty/vt/vt.c int from, wrap, from_off, avail; from 4737 drivers/tty/vt/vt.c from = scr_end; from 4740 drivers/tty/vt/vt.c from = 0; from 4744 drivers/tty/vt/vt.c from_off = (vorigin - from + wrap) % wrap + lines * c->vc_size_row; from 4745 drivers/tty/vt/vt.c avail = (origin - from + wrap) % wrap; from 4755 drivers/tty/vt/vt.c c->vc_visible_origin = ubase + (from + from_off) % wrap; from 1183 drivers/usb/gadget/function/f_fs.c static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) from 1202 drivers/usb/gadget/function/f_fs.c p->data = *from; from 1216 drivers/usb/gadget/function/f_fs.c *from = p->data; from 632 drivers/usb/gadget/legacy/inode.c ep_write_iter(struct kiocb *iocb, struct iov_iter *from) from 636 drivers/usb/gadget/legacy/inode.c size_t len = iov_iter_count(from); from 668 drivers/usb/gadget/legacy/inode.c if (unlikely(!copy_from_iter_full(buf, len, from))) { from 700 drivers/usb/misc/sisusbvga/sisusb_con.c int from, to, baseline; from 730 drivers/usb/misc/sisusbvga/sisusb_con.c case CUR_BLOCK: from = 1; from 733 drivers/usb/misc/sisusbvga/sisusb_con.c case CUR_TWO_THIRDS: from = c->vc_font.height / 3; from 736 drivers/usb/misc/sisusbvga/sisusb_con.c case CUR_LOWER_HALF: from = c->vc_font.height / 2; from 739 drivers/usb/misc/sisusbvga/sisusb_con.c case CUR_LOWER_THIRD: from = (c->vc_font.height * 2) / 3; from 742 drivers/usb/misc/sisusbvga/sisusb_con.c case CUR_NONE: from = 31; from 746 drivers/usb/misc/sisusbvga/sisusb_con.c case CUR_UNDERLINE: from = baseline - 1; from 751 drivers/usb/misc/sisusbvga/sisusb_con.c if (sisusb->sisusb_cursor_size_from != from || from 754 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb_setidxreg(sisusb, SISCR, 0x0a, from); from 757 drivers/usb/misc/sisusbvga/sisusb_con.c sisusb->sisusb_cursor_size_from = from; from 230 drivers/usb/mon/mon_bin.c unsigned int off, const unsigned char *from, unsigned int length) from 249 drivers/usb/mon/mon_bin.c memcpy(buf, from, step_len); from 251 drivers/usb/mon/mon_bin.c from += step_len; from 496 drivers/usb/storage/freecom.c int from, base; from 521 drivers/usb/storage/freecom.c from = (length - 1) % 16; from 524 drivers/usb/storage/freecom.c for (i = from + 1; i < 16; i++) from 526 drivers/usb/storage/freecom.c if (from < 8) from 530 drivers/usb/storage/freecom.c for (i = 0; i <= from; i++) { from 683 drivers/vhost/net.c struct iov_iter *from) from 693 drivers/vhost/net.c size_t len = iov_iter_count(from); from 718 drivers/vhost/net.c sock_hlen, from); from 740 drivers/vhost/net.c len, from); from 1765 drivers/vhost/net.c struct iov_iter *from) from 1771 drivers/vhost/net.c return vhost_chr_write_iter(dev, from); from 780 drivers/vhost/vhost.c const void *from, unsigned size) from 785 drivers/vhost/vhost.c return __copy_to_user(to, from, size); from 798 drivers/vhost/vhost.c return __copy_to_user(uaddr, from, size); from 806 drivers/vhost/vhost.c ret = copy_to_iter(from, size, &t); from 815 drivers/vhost/vhost.c void __user *from, unsigned size) from 820 drivers/vhost/vhost.c return __copy_from_user(to, from, size); from 828 drivers/vhost/vhost.c (u64)(uintptr_t)from, size, from 835 drivers/vhost/vhost.c ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, from 840 drivers/vhost/vhost.c "%p size 0x%llx\n", from, from 948 drivers/vhost/vhost.c __typeof__(ptr) from = \ from 952 drivers/vhost/vhost.c if (from != NULL) \ from 953 drivers/vhost/vhost.c ret = __get_user(x, from); \ from 1142 drivers/vhost/vhost.c struct iov_iter *from) from 1148 drivers/vhost/vhost.c ret = copy_from_iter(&type, sizeof(type), from); from 1169 drivers/vhost/vhost.c iov_iter_advance(from, offset); from 1170 drivers/vhost/vhost.c ret = copy_from_iter(&msg, sizeof(msg), from); from 2111 drivers/vhost/vhost.c struct iov_iter from; from 2130 drivers/vhost/vhost.c iov_iter_init(&from, READ, vq->indirect, ret, len); from 2153 drivers/vhost/vhost.c if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) { from 228 drivers/vhost/vhost.h struct iov_iter *from); from 159 drivers/video/console/mdacon.c static inline void mda_set_cursor_size(int from, int to) from 161 drivers/video/console/mdacon.c if (mda_cursor_size_from==from && mda_cursor_size_to==to) from 164 drivers/video/console/mdacon.c if (from > to) { from 167 drivers/video/console/mdacon.c write_mda_b(from, 0x0a); /* cursor start */ from 171 drivers/video/console/mdacon.c mda_cursor_size_from = from; from 679 drivers/video/console/vgacon.c static void vgacon_set_cursor_size(int xpos, int from, int to) from 684 drivers/video/console/vgacon.c if ((from == cursor_size_lastfrom) && (to == cursor_size_lastto)) from 686 drivers/video/console/vgacon.c cursor_size_lastfrom = from; from 700 drivers/video/console/vgacon.c curs = (curs & 0xc0) | from; from 166 drivers/video/fbdev/core/fbcmap.c int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) from 171 drivers/video/fbdev/core/fbcmap.c if (to->start > from->start) from 172 drivers/video/fbdev/core/fbcmap.c fromoff = to->start - from->start; from 174 drivers/video/fbdev/core/fbcmap.c tooff = from->start - to->start; from 175 drivers/video/fbdev/core/fbcmap.c if (fromoff >= from->len || tooff >= to->len) from 178 drivers/video/fbdev/core/fbcmap.c size = min_t(size_t, to->len - tooff, from->len - fromoff); from 183 drivers/video/fbdev/core/fbcmap.c memcpy(to->red+tooff, from->red+fromoff, size); from 184 drivers/video/fbdev/core/fbcmap.c memcpy(to->green+tooff, from->green+fromoff, size); from 185 drivers/video/fbdev/core/fbcmap.c memcpy(to->blue+tooff, from->blue+fromoff, size); from 186 drivers/video/fbdev/core/fbcmap.c if (from->transp && to->transp) from 187 drivers/video/fbdev/core/fbcmap.c memcpy(to->transp+tooff, from->transp+fromoff, size); from 191 drivers/video/fbdev/core/fbcmap.c int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) from 196 drivers/video/fbdev/core/fbcmap.c if (to->start > from->start) from 197 drivers/video/fbdev/core/fbcmap.c fromoff = to->start - from->start; from 199 drivers/video/fbdev/core/fbcmap.c tooff = from->start - to->start; from 200 drivers/video/fbdev/core/fbcmap.c if (fromoff >= from->len || tooff >= to->len) from 203 drivers/video/fbdev/core/fbcmap.c size = min_t(size_t, to->len - tooff, from->len - fromoff); from 208 drivers/video/fbdev/core/fbcmap.c if (copy_to_user(to->red+tooff, from->red+fromoff, size)) from 210 drivers/video/fbdev/core/fbcmap.c if (copy_to_user(to->green+tooff, from->green+fromoff, size)) from 212 drivers/video/fbdev/core/fbcmap.c if (copy_to_user(to->blue+tooff, from->blue+fromoff, size)) from 214 drivers/video/fbdev/core/fbcmap.c if (from->transp && to->transp) from 215 drivers/video/fbdev/core/fbcmap.c if (copy_to_user(to->transp+tooff, from->transp+fromoff, size)) from 21 drivers/video/fbdev/nvidia/nv_type.h #define SetBitField(value,from,to) SetBF(to, GetBF(value,from)) from 200 drivers/video/fbdev/omap2/omapfb/dss/display.c struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from) from 212 drivers/video/fbdev/omap2/omapfb/dss/display.c if (from == NULL) { from 219 drivers/video/fbdev/omap2/omapfb/dss/display.c omap_dss_put_device(from); from 223 drivers/video/fbdev/omap2/omapfb/dss/display.c if (dssdev == from) { from 83 drivers/video/fbdev/riva/fbdev.c #define SetBitField(value,from,to) SetBF(to,GetBF(value,from)) from 3326 drivers/video/fbdev/sis/init.c #define GETBITSTR(val,from,to) ((GETBITS(val,from)) << (0?to)) from 186 drivers/visorbus/visorbus_main.c struct visor_device *from) from 195 drivers/visorbus/visorbus_main.c if (from) from 196 drivers/visorbus/visorbus_main.c dev_start = &from->device; from 18 drivers/visorbus/visorbus_private.h struct visor_device *from); from 52 drivers/zorro/zorro.c struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from) from 59 drivers/zorro/zorro.c for (z = from ? from+1 : &zorro_autocon[0]; from 153 fs/9p/vfs_addr.c struct iov_iter from; from 165 fs/9p/vfs_addr.c iov_iter_bvec(&from, WRITE, &bvec, 1, len); from 172 fs/9p/vfs_addr.c p9_client_write(v9inode->writeback_fid, page_offset(page), &from, &err); from 408 fs/9p/vfs_file.c v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) from 415 fs/9p/vfs_file.c retval = generic_write_checks(iocb, from); from 420 fs/9p/vfs_file.c retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err); from 596 fs/9p/vfs_file.c v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from) from 602 fs/9p/vfs_file.c return v9fs_file_write_iter(iocb, from); from 107 fs/9p/xattr.c struct iov_iter from; from 110 fs/9p/xattr.c iov_iter_kvec(&from, WRITE, &kvec, 1, value_len); from 128 fs/9p/xattr.c p9_client_write(fid, 0, &from, &retval); from 678 fs/affs/file.c unsigned from, to; from 682 fs/affs/file.c from = pos & (PAGE_SIZE - 1); from 683 fs/affs/file.c to = from + len; from 697 fs/affs/file.c tmp = (page->index << PAGE_SHIFT) + from; from 706 fs/affs/file.c tmp = min(bsize - boff, to - from); from 708 fs/affs/file.c memcpy(AFFS_DATA(bh) + boff, data + from, tmp); from 713 fs/affs/file.c from += tmp; from 722 fs/affs/file.c while (from + bsize <= to) { from 727 fs/affs/file.c memcpy(AFFS_DATA(bh), data + from, bsize); from 751 fs/affs/file.c from += bsize; from 754 fs/affs/file.c if (from < to) { from 759 fs/affs/file.c tmp = min(bsize, to - from); from 761 fs/affs/file.c memcpy(AFFS_DATA(bh), data + from, tmp); from 786 fs/affs/file.c from += tmp; from 793 fs/affs/file.c tmp = (page->index << PAGE_SHIFT) + from; from 36 fs/afs/dir.c static int afs_link(struct dentry *from, struct inode *dir, from 1661 fs/afs/dir.c static int afs_link(struct dentry *from, struct inode *dir, from 1667 fs/afs/dir.c struct afs_vnode *vnode = AFS_FS_I(d_inode(from)); from 1174 fs/afs/internal.h enum afs_call_state from, from 1180 fs/afs/internal.h if (call->state == from) { from 1182 fs/afs/internal.h trace_afs_call_state(call, from, to, 0, 0); from 85 fs/afs/write.c unsigned f, from = pos & (PAGE_SIZE - 1); from 86 fs/afs/write.c unsigned t, to = from + len; from 91 fs/afs/write.c vnode->fid.vid, vnode->fid.vnode, index, from, to); from 139 fs/afs/write.c (to < f || from > t)) from 141 fs/afs/write.c if (from < f) from 142 fs/afs/write.c f = from; from 146 fs/afs/write.c f = from; from 731 fs/afs/write.c ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) from 735 fs/afs/write.c size_t count = iov_iter_count(from); from 749 fs/afs/write.c result = generic_file_write_iter(iocb, from); from 33 fs/bfs/file.c static int bfs_move_block(unsigned long from, unsigned long to, from 38 fs/bfs/file.c bh = sb_bread(sb, from); from 1265 fs/block_dev.c static int add_symlink(struct kobject *from, struct kobject *to) from 1267 fs/block_dev.c return sysfs_create_link(from, to, kobject_name(to)); from 1270 fs/block_dev.c static void del_symlink(struct kobject *from, struct kobject *to) from 1272 fs/block_dev.c sysfs_remove_link(from, kobject_name(to)); from 1968 fs/block_dev.c ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) from 1983 fs/block_dev.c if (!iov_iter_count(from)) from 1992 fs/block_dev.c iov_iter_truncate(from, size - iocb->ki_pos); from 1995 fs/block_dev.c ret = __generic_file_write_iter(iocb, from); from 2833 fs/btrfs/ctree.h int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, from 1827 fs/btrfs/file.c static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) from 1837 fs/btrfs/file.c written = generic_file_direct_write(iocb, from); from 1839 fs/btrfs/file.c if (written < 0 || !iov_iter_count(from)) from 1843 fs/btrfs/file.c written_buffered = btrfs_buffered_write(iocb, from); from 1886 fs/btrfs/file.c struct iov_iter *from) from 1913 fs/btrfs/file.c err = generic_write_checks(iocb, from); from 1920 fs/btrfs/file.c count = iov_iter_count(from); from 1980 fs/btrfs/file.c num_written = __btrfs_direct_write(iocb, from); from 1982 fs/btrfs/file.c num_written = btrfs_buffered_write(iocb, from); from 5040 fs/btrfs/inode.c int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, from 5051 fs/btrfs/inode.c pgoff_t index = from >> PAGE_SHIFT; from 5052 fs/btrfs/inode.c unsigned offset = from & (blocksize - 1); from 5063 fs/btrfs/inode.c block_start = round_down(from, blocksize); from 512 fs/btrfs/send.c static int fs_path_copy(struct fs_path *p, struct fs_path *from) from 516 fs/btrfs/send.c p->reversed = from->reversed; from 519 fs/btrfs/send.c ret = fs_path_add_path(p, from); from 729 fs/btrfs/send.c struct fs_path *from, struct fs_path *to) from 734 fs/btrfs/send.c btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start); from 740 fs/btrfs/send.c TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from); from 522 fs/btrfs/super.c strncmp(args[0].from, "zlib", 4) == 0) { from 537 fs/btrfs/super.c args[0].from + 4); from 542 fs/btrfs/super.c } else if (strncmp(args[0].from, "lzo", 3) == 0) { from 550 fs/btrfs/super.c } else if (strncmp(args[0].from, "zstd", 4) == 0) { from 556 fs/btrfs/super.c args[0].from + 4); from 562 fs/btrfs/super.c } else if (strncmp(args[0].from, "no", 2) == 0) { from 708 fs/btrfs/super.c strcmp(args[0].from, "v1") == 0) { from 713 fs/btrfs/super.c } else if (strcmp(args[0].from, "v2") == 0) { from 807 fs/btrfs/super.c if (strcmp(args[0].from, "panic") == 0) from 810 fs/btrfs/super.c else if (strcmp(args[0].from, "bug") == 0) from 2610 fs/btrfs/tree-log.c u64 from; from 2617 fs/btrfs/tree-log.c from = ALIGN(i_size_read(inode), from 2620 fs/btrfs/tree-log.c from, (u64)-1, 1); from 1849 fs/buffer.c void page_zero_new_buffers(struct page *page, unsigned from, unsigned to) from 1864 fs/buffer.c if (block_end > from && block_start < to) { from 1868 fs/buffer.c start = max(from, block_start); from 1944 fs/buffer.c unsigned from = pos & (PAGE_SIZE - 1); from 1945 fs/buffer.c unsigned to = from + len; from 1954 fs/buffer.c BUG_ON(from > PAGE_SIZE); from 1956 fs/buffer.c BUG_ON(from > to); from 1967 fs/buffer.c if (block_end <= from || block_start >= to) { from 1994 fs/buffer.c if (block_end > to || block_start < from) from 1997 fs/buffer.c block_start, from); from 2008 fs/buffer.c (block_start < from || block_end > to)) { from 2022 fs/buffer.c page_zero_new_buffers(page, from, to); from 2034 fs/buffer.c unsigned from, unsigned to) from 2047 fs/buffer.c if (block_end <= from || block_start >= to) { from 2182 fs/buffer.c int block_is_partially_uptodate(struct page *page, unsigned long from, from 2195 fs/buffer.c to = min_t(unsigned, PAGE_SIZE - from, count); from 2196 fs/buffer.c to = from + to; from 2197 fs/buffer.c if (from < blocksize && to > PAGE_SIZE - blocksize) from 2204 fs/buffer.c if (block_end > from && block_start < to) { from 2441 fs/buffer.c int block_commit_write(struct page *page, unsigned from, unsigned to) from 2444 fs/buffer.c __block_commit_write(inode,page,from,to); from 2556 fs/buffer.c unsigned from, to; from 2565 fs/buffer.c from = pos & (PAGE_SIZE - 1); from 2566 fs/buffer.c to = from + len; from 2629 fs/buffer.c zero_user_segments(page, block_start, from, from 2635 fs/buffer.c if (block_start < from || block_end > to) { from 2675 fs/buffer.c page_zero_new_buffers(page, from, to); from 2774 fs/buffer.c loff_t from, get_block_t *get_block) from 2776 fs/buffer.c pgoff_t index = from >> PAGE_SHIFT; from 2777 fs/buffer.c unsigned offset = from & (PAGE_SIZE-1); from 2805 fs/buffer.c return block_truncate_page(mapping, from, get_block); from 2852 fs/buffer.c loff_t from, get_block_t *get_block) from 2854 fs/buffer.c pgoff_t index = from >> PAGE_SHIFT; from 2855 fs/buffer.c unsigned offset = from & (PAGE_SIZE-1); from 1129 fs/ceph/file.c ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, from 1146 fs/ceph/file.c size_t count = iov_iter_count(from); from 1167 fs/ceph/file.c while ((len = iov_iter_count(from)) > 0) { from 1198 fs/ceph/file.c ret = copy_page_from_iter(pages[n], 0, plen, from); from 1411 fs/ceph/file.c static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) from 1453 fs/ceph/file.c err = generic_write_checks(iocb, from); from 1462 fs/ceph/file.c iov_iter_truncate(from, limit - pos); from 1465 fs/ceph/file.c count = iov_iter_count(from); from 1532 fs/ceph/file.c data = *from; from 1543 fs/ceph/file.c iov_iter_advance(from, written); from 1553 fs/ceph/file.c written = generic_perform_write(file, from, pos); from 256 fs/ceph/super.c argstr[0].from); from 264 fs/ceph/super.c fsopt->snapdir_name = kstrndup(argstr[0].from, from 265 fs/ceph/super.c argstr[0].to-argstr[0].from, from 272 fs/ceph/super.c fsopt->mds_namespace = kstrndup(argstr[0].from, from 273 fs/ceph/super.c argstr[0].to-argstr[0].from, from 279 fs/ceph/super.c if (!strncmp(argstr[0].from, "no", from 280 fs/ceph/super.c argstr[0].to - argstr[0].from)) { from 282 fs/ceph/super.c } else if (!strncmp(argstr[0].from, "clean", from 283 fs/ceph/super.c argstr[0].to - argstr[0].from)) { from 292 fs/ceph/super.c fsopt->fscache_uniq = kstrndup(argstr[0].from, from 293 fs/ceph/super.c argstr[0].to-argstr[0].from, from 200 fs/char_dev.c int register_chrdev_region(dev_t from, unsigned count, const char *name) from 203 fs/char_dev.c dev_t to = from + count; from 206 fs/char_dev.c for (n = from; n < to; n = next) { from 218 fs/char_dev.c for (n = from; n < to; n = next) { from 311 fs/char_dev.c void unregister_chrdev_region(dev_t from, unsigned count) from 313 fs/char_dev.c dev_t to = from + count; from 316 fs/char_dev.c for (n = from; n < to; n = next) { from 120 fs/cifs/cifs_unicode.c cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp, from 126 fs/cifs/cifs_unicode.c src_char = *from; from 145 fs/cifs/cifs_unicode.c len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6); from 179 fs/cifs/cifs_unicode.c cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen, from 198 fs/cifs/cifs_unicode.c ftmp[0] = get_unaligned_le16(&from[i]); from 202 fs/cifs/cifs_unicode.c ftmp[1] = get_unaligned_le16(&from[i + 1]); from 206 fs/cifs/cifs_unicode.c ftmp[2] = get_unaligned_le16(&from[i + 2]); from 250 fs/cifs/cifs_unicode.c cifs_strtoUTF16(__le16 *to, const char *from, int len, from 264 fs/cifs/cifs_unicode.c i = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN, from 278 fs/cifs/cifs_unicode.c for (i = 0; len && *from; i++, from += charlen, len -= charlen) { from 279 fs/cifs/cifs_unicode.c charlen = codepage->char2uni(from, len, &wchar_to); from 282 fs/cifs/cifs_unicode.c *from, charlen); from 306 fs/cifs/cifs_unicode.c cifs_utf16_bytes(const __le16 *from, int maxbytes, from 316 fs/cifs/cifs_unicode.c ftmp[0] = get_unaligned_le16(&from[i]); from 320 fs/cifs/cifs_unicode.c ftmp[1] = get_unaligned_le16(&from[i + 1]); from 324 fs/cifs/cifs_unicode.c ftmp[2] = get_unaligned_le16(&from[i + 2]); from 587 fs/cifs/cifs_unicode.c cifs_local_to_utf16_bytes(const char *from, int len, from 594 fs/cifs/cifs_unicode.c for (i = 0; len && *from; i++, from += charlen, len -= charlen) { from 595 fs/cifs/cifs_unicode.c charlen = codepage->char2uni(from, len, &wchar_to); from 93 fs/cifs/cifs_unicode.h int cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen, from 95 fs/cifs/cifs_unicode.h int cifs_utf16_bytes(const __le16 *from, int maxbytes, from 901 fs/cifs/cifsfs.c static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) from 909 fs/cifs/cifsfs.c written = cifs_user_writev(iocb, from); from 924 fs/cifs/cifsfs.c written = generic_file_write_iter(iocb, from); from 108 fs/cifs/cifsfs.h extern ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from); from 109 fs/cifs/cifsfs.h extern ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from); from 110 fs/cifs/cifsfs.h extern ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from); from 2094 fs/cifs/file.c static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) from 2109 fs/cifs/file.c offset += (loff_t)from; from 2111 fs/cifs/file.c write_data += from; from 2113 fs/cifs/file.c if ((to > PAGE_SIZE) || (from > to)) { from 2132 fs/cifs/file.c write_data, to - from, &offset); from 2714 fs/cifs/file.c wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from, from 2723 fs/cifs/file.c copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from); from 2819 fs/cifs/file.c cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, from 2828 fs/cifs/file.c struct iov_iter saved_from = *from; from 2868 fs/cifs/file.c from, &pagevec, cur_len, &start); from 2874 fs/cifs/file.c result, from->type, from 2875 fs/cifs/file.c from->iov_offset, from->count); from 2883 fs/cifs/file.c iov_iter_advance(from, cur_len); from 2923 fs/cifs/file.c wdata, from, &cur_len, &num_pages); from 2969 fs/cifs/file.c *from = saved_from; from 2970 fs/cifs/file.c iov_iter_advance(from, offset - saved_offset); from 3068 fs/cifs/file.c struct kiocb *iocb, struct iov_iter *from, bool direct) from 3076 fs/cifs/file.c struct iov_iter saved_from = *from; from 3077 fs/cifs/file.c size_t len = iov_iter_count(from); from 3085 fs/cifs/file.c if (direct && from->type & ITER_KVEC) { from 3090 fs/cifs/file.c rc = generic_write_checks(iocb, from); from 3114 fs/cifs/file.c ctx->iter = *from; from 3117 fs/cifs/file.c rc = setup_aio_ctx_iter(ctx, from, WRITE); from 3171 fs/cifs/file.c ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from) from 3173 fs/cifs/file.c return __cifs_writev(iocb, from, true); from 3176 fs/cifs/file.c ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from) from 3178 fs/cifs/file.c return __cifs_writev(iocb, from, false); from 3182 fs/cifs/file.c cifs_writev(struct kiocb *iocb, struct iov_iter *from) from 3198 fs/cifs/file.c rc = generic_write_checks(iocb, from); from 3202 fs/cifs/file.c if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from), from 3205 fs/cifs/file.c rc = __generic_file_write_iter(iocb, from); from 3218 fs/cifs/file.c cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from) from 3236 fs/cifs/file.c written = generic_file_write_iter(iocb, from); from 3239 fs/cifs/file.c written = cifs_writev(iocb, from); from 3248 fs/cifs/file.c written = cifs_user_writev(iocb, from); from 2172 fs/cifs/inode.c static int cifs_truncate_page(struct address_space *mapping, loff_t from) from 2174 fs/cifs/inode.c pgoff_t index = from >> PAGE_SHIFT; from 2175 fs/cifs/inode.c unsigned offset = from & (PAGE_SIZE - 1); from 445 fs/cifs/smb2misc.c cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb) from 460 fs/cifs/smb2misc.c if (from[0] == '\\') from 461 fs/cifs/smb2misc.c start_of_path = from + 1; from 466 fs/cifs/smb2misc.c (from[0] == '/')) { from 467 fs/cifs/smb2misc.c start_of_path = from + 1; from 469 fs/cifs/smb2misc.c start_of_path = from; from 42 fs/cifs/smb2proto.h extern __le16 *cifs_convert_path_to_utf16(const char *from, from 112 fs/direct-io.c size_t from, to; from 172 fs/direct-io.c &sdio->from); from 187 fs/direct-io.c sdio->from = 0; from 194 fs/direct-io.c ret += sdio->from; from 971 fs/direct-io.c size_t from, to; from 978 fs/direct-io.c from = sdio->head ? 0 : sdio->from; from 982 fs/direct-io.c while (from < to) { from 1057 fs/direct-io.c zero_user(page, from, 1 << blkbits); from 1059 fs/direct-io.c from += 1 << blkbits; from 1077 fs/direct-io.c u = (to - from) >> blkbits; from 1089 fs/direct-io.c from, from 1100 fs/direct-io.c from += this_chunk_bytes; from 3975 fs/dlm/lock.c int from = ms->m_header.h_nodeid; from 3982 fs/dlm/lock.c if (!is_master_copy(lkb) || lkb->lkb_nodeid != from) from 3991 fs/dlm/lock.c if (!is_process_copy(lkb) || lkb->lkb_nodeid != from) from 3998 fs/dlm/lock.c else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from) from 4009 fs/dlm/lock.c ms->m_type, from, lkb->lkb_id, lkb->lkb_remid, from 283 fs/ecryptfs/main.c sig_src = args[0].from; from 295 fs/ecryptfs/main.c cipher_name_src = args[0].from; from 305 fs/ecryptfs/main.c cipher_key_bytes_src = args[0].from; from 328 fs/ecryptfs/main.c fnek_src = args[0].from; from 350 fs/ecryptfs/main.c fn_cipher_name_src = args[0].from; from 360 fs/ecryptfs/main.c fn_cipher_key_bytes_src = args[0].from; from 468 fs/ecryptfs/mmap.c unsigned from = pos & (PAGE_SIZE - 1); from 469 fs/ecryptfs/mmap.c unsigned to = from + copied; from 246 fs/erofs/super.c args[0].to = args[0].from = NULL; from 564 fs/ext2/dir.c unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1); from 569 fs/ext2/dir.c ext2_dirent * de = (ext2_dirent *) (kaddr + from); from 583 fs/ext2/dir.c from = (char*)pde - (char*)page_address(page); from 584 fs/ext2/dir.c pos = page_offset(page) + from; from 586 fs/ext2/dir.c err = ext2_prepare_chunk(page, pos, to - from); from 589 fs/ext2/dir.c pde->rec_len = ext2_rec_len_to_disk(to - from); from 591 fs/ext2/dir.c err = ext2_commit_chunk(page, pos, to - from); from 49 fs/ext2/file.c static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) from 56 fs/ext2/file.c ret = generic_write_checks(iocb, from); from 66 fs/ext2/file.c ret = dax_iomap_rw(iocb, from, &ext2_iomap_ops); from 172 fs/ext2/file.c static ssize_t ext2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) from 176 fs/ext2/file.c return ext2_dax_write_iter(iocb, from); from 178 fs/ext2/file.c return generic_file_write_iter(iocb, from); from 126 fs/ext2/inode.c static inline int verify_chain(Indirect *from, Indirect *to) from 128 fs/ext2/inode.c while (from <= to && from->key == *from->p) from 129 fs/ext2/inode.c from++; from 130 fs/ext2/inode.c return (from > to); from 2592 fs/ext4/ext4.h unsigned from, from 2597 fs/ext4/extents.c ext4_lblk_t from, ext4_lblk_t to) from 2606 fs/ext4/extents.c if (from < le32_to_cpu(ex->ee_block) || from 2610 fs/ext4/extents.c from, to, le32_to_cpu(ex->ee_block), ee_len); from 2627 fs/ext4/extents.c trace_ext4_remove_blocks(inode, ex, from, to, partial); from 2650 fs/ext4/extents.c num = le32_to_cpu(ex->ee_block) + ee_len - from; from 2663 fs/ext4/extents.c (EXT4_LBLK_CMASK(sbi, to) >= from) && from 2701 fs/ext4/extents.c if (EXT4_LBLK_COFF(sbi, from) && num == ee_len) { from 2704 fs/ext4/extents.c partial->lblk = from; from 124 fs/ext4/file.c ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos) from 132 fs/ext4/file.c if ((pos | iov_iter_alignment(from)) & blockmask) from 161 fs/ext4/file.c static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from) from 166 fs/ext4/file.c ret = generic_write_checks(iocb, from); from 182 fs/ext4/file.c iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos); from 184 fs/ext4/file.c return iov_iter_count(from); from 189 fs/ext4/file.c ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) from 200 fs/ext4/file.c ret = ext4_write_checks(iocb, from); from 210 fs/ext4/file.c ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops); from 220 fs/ext4/file.c ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) from 233 fs/ext4/file.c return ext4_dax_write_iter(iocb, from); from 242 fs/ext4/file.c ret = ext4_write_checks(iocb, from); from 253 fs/ext4/file.c ext4_unaligned_aio(inode, from, iocb->ki_pos)) { from 261 fs/ext4/file.c if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) { from 270 fs/ext4/file.c ret = __generic_file_write_iter(iocb, from); from 533 fs/ext4/inline.c unsigned from, to; from 577 fs/ext4/inline.c from = 0; from 590 fs/ext4/inline.c ret = __block_write_begin(page, from, to, from 593 fs/ext4/inline.c ret = __block_write_begin(page, from, to, ext4_get_block); from 597 fs/ext4/inline.c from, to, NULL, from 625 fs/ext4/inline.c block_commit_write(page, from, to); from 1092 fs/ext4/inode.c unsigned from, from 1109 fs/ext4/inode.c if (block_end <= from || block_start >= to) { from 1174 fs/ext4/inode.c unsigned from = pos & (PAGE_SIZE - 1); from 1175 fs/ext4/inode.c unsigned to = from + len; from 1187 fs/ext4/inode.c BUG_ON(from > PAGE_SIZE); from 1189 fs/ext4/inode.c BUG_ON(from > to); from 1200 fs/ext4/inode.c if (block_end <= from || block_start >= to) { from 1221 fs/ext4/inode.c if (block_end > to || block_start < from) from 1223 fs/ext4/inode.c block_start, from); from 1234 fs/ext4/inode.c (block_start < from || block_end > to)) { from 1248 fs/ext4/inode.c page_zero_new_buffers(page, from, to); from 1276 fs/ext4/inode.c unsigned from, to; from 1288 fs/ext4/inode.c from = pos & (PAGE_SIZE - 1); from 1289 fs/ext4/inode.c to = from + len; from 1347 fs/ext4/inode.c from, to, NULL, from 1491 fs/ext4/inode.c unsigned from, unsigned to) from 1500 fs/ext4/inode.c if (block_end > from && block_start < to) { from 1504 fs/ext4/inode.c start = max(from, block_start); from 1528 fs/ext4/inode.c unsigned from, to; from 1534 fs/ext4/inode.c from = pos & (PAGE_SIZE - 1); from 1535 fs/ext4/inode.c to = from + len; from 1550 fs/ext4/inode.c ext4_journalled_zero_new_buffers(handle, page, from, to); from 1554 fs/ext4/inode.c from + copied, to); from 1555 fs/ext4/inode.c ret = ext4_walk_page_buffers(handle, page_buffers(page), from, from 1556 fs/ext4/inode.c from + copied, &partial, from 4002 fs/ext4/inode.c struct address_space *mapping, loff_t from, loff_t length) from 4004 fs/ext4/inode.c ext4_fsblk_t index = from >> PAGE_SHIFT; from 4005 fs/ext4/inode.c unsigned offset = from & (PAGE_SIZE-1); from 4013 fs/ext4/inode.c page = find_or_create_page(mapping, from >> PAGE_SHIFT, from 4080 fs/ext4/inode.c err = ext4_jbd2_inode_add_write(handle, inode, from, from 4098 fs/ext4/inode.c struct address_space *mapping, loff_t from, loff_t length) from 4101 fs/ext4/inode.c unsigned offset = from & (PAGE_SIZE-1); from 4113 fs/ext4/inode.c return iomap_zero_range(inode, from, length, NULL, from 4116 fs/ext4/inode.c return __ext4_block_zero_page_range(handle, mapping, from, length); from 4126 fs/ext4/inode.c struct address_space *mapping, loff_t from) from 4128 fs/ext4/inode.c unsigned offset = from & (PAGE_SIZE-1); from 4140 fs/ext4/inode.c return ext4_block_zero_page_range(handle, mapping, from, length); from 90 fs/ext4/move_extent.c mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count, from 96 fs/ext4/move_extent.c ext4_lblk_t last = from + count; from 97 fs/ext4/move_extent.c while (from < last) { from 98 fs/ext4/move_extent.c *err = get_ext_path(inode, from, &path); from 104 fs/ext4/move_extent.c from += ext4_ext_get_actual_len(ext); from 167 fs/ext4/move_extent.c mext_page_mkuptodate(struct page *page, unsigned from, unsigned to) from 189 fs/ext4/move_extent.c if (block_end <= from || block_start >= to) { from 260 fs/ext4/move_extent.c int from = data_offset_in_page << orig_inode->i_blkbits; from 348 fs/ext4/move_extent.c *err = mext_page_mkuptodate(pagep[0], from, from + replaced_size); from 386 fs/ext4/move_extent.c *err = block_commit_write(pagep[0], from, from + replaced_size); from 280 fs/ext4/namei.c static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to, from 1762 fs/ext4/namei.c dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count, from 1769 fs/ext4/namei.c (from + (map->offs<<2)); from 1867 fs/ext4/super.c if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg)) from 1869 fs/ext4/super.c if (args->from && (m->flags & MOPT_GTE0) && (arg < 0)) from 1923 fs/ext4/super.c if (!args->from) from 2049 fs/ext4/super.c if (!args->from) from 2087 fs/ext4/super.c args[0].to = args[0].from = NULL; from 2523 fs/ext4/xattr.c void *from, size_t n) from 2540 fs/ext4/xattr.c memmove(to, from, n); from 1069 fs/f2fs/data.c int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) from 1078 fs/f2fs/data.c map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from)); from 1091 fs/f2fs/data.c flag = f2fs_force_buffered_io(inode, iocb, from) ? from 1096 fs/f2fs/data.c if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) { from 2927 fs/f2fs/f2fs.h int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); from 3052 fs/f2fs/f2fs.h int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); from 3216 fs/f2fs/f2fs.h int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from); from 3495 fs/f2fs/f2fs.h struct page *ipage, u64 from); from 572 fs/f2fs/file.c static int truncate_partial_data_page(struct inode *inode, u64 from, from 575 fs/f2fs/file.c loff_t offset = from & (PAGE_SIZE - 1); from 576 fs/f2fs/file.c pgoff_t index = from >> PAGE_SHIFT; from 606 fs/f2fs/file.c int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) from 615 fs/f2fs/file.c trace_f2fs_truncate_blocks_enter(inode, from); from 617 fs/f2fs/file.c free_from = (pgoff_t)F2FS_BLK_ALIGN(from); from 632 fs/f2fs/file.c f2fs_truncate_inline_inode(inode, ipage, from); from 665 fs/f2fs/file.c err = truncate_partial_data_page(inode, from, truncate_page); from 3313 fs/f2fs/file.c static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) from 3333 fs/f2fs/file.c ret = generic_write_checks(iocb, from); from 3339 fs/f2fs/file.c if (iov_iter_fault_in_readable(from, iov_iter_count(from))) from 3344 fs/f2fs/file.c iov_iter_count(from)) || from 3346 fs/f2fs/file.c f2fs_force_buffered_io(inode, iocb, from)) { from 3371 fs/f2fs/file.c if (!f2fs_force_buffered_io(inode, iocb, from) && from 3372 fs/f2fs/file.c allow_outplace_dio(inode, iocb, from)) from 3376 fs/f2fs/file.c target_size = iocb->ki_pos + iov_iter_count(from); from 3378 fs/f2fs/file.c err = f2fs_preallocate_blocks(iocb, from); from 3387 fs/f2fs/file.c ret = __generic_file_write_iter(iocb, from); from 3400 fs/f2fs/file.c iov_iter_count(from), ret); from 66 fs/f2fs/inline.c struct page *ipage, u64 from) from 70 fs/f2fs/inline.c if (from >= MAX_INLINE_DATA(inode)) from 76 fs/f2fs/inline.c memset(addr + from, 0, MAX_INLINE_DATA(inode) - from); from 79 fs/f2fs/inline.c if (from == 0) from 1036 fs/f2fs/node.c int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) from 1046 fs/f2fs/node.c trace_f2fs_truncate_inode_blocks_enter(inode, from); from 1048 fs/f2fs/node.c level = get_node_path(inode, from, offset, noffset); from 413 fs/f2fs/super.c args[0].to = args[0].from = NULL; from 476 fs/f2fs/super.c if (args->from && match_int(args, &arg)) from 511 fs/f2fs/super.c if (args->from && match_int(args, &arg)) from 554 fs/f2fs/super.c if (args->from && match_int(args, &arg)) from 565 fs/f2fs/super.c if (args->from && match_int(args, &arg)) from 575 fs/f2fs/super.c if (args->from && match_int(args, &arg)) from 607 fs/f2fs/super.c if (args->from && match_int(args, &arg)) from 618 fs/f2fs/super.c if (args->from && match_int(args, &arg)) from 625 fs/f2fs/super.c if (args->from && match_int(args, &arg)) from 789 fs/f2fs/super.c if (args->from && match_int(args, &arg)) from 802 fs/f2fs/super.c if (args->from && match_int(args, &arg)) from 408 fs/fat/fat.h extern int fat_block_truncate_page(struct inode *inode, loff_t from); from 339 fs/fat/inode.c int fat_block_truncate_page(struct inode *inode, loff_t from) from 341 fs/fat/inode.c return block_truncate_page(inode->i_mapping, from, fat_get_block); from 983 fs/file.c int f_dupfd(unsigned int from, struct file *file, unsigned flags) from 986 fs/file.c if (from >= rlimit(RLIMIT_NOFILE)) from 988 fs/file.c err = alloc_fd(from, flags); from 101 fs/fuse/cuse.c static ssize_t cuse_write_iter(struct kiocb *kiocb, struct iov_iter *from) from 109 fs/fuse/cuse.c return fuse_direct_io(&io, from, &pos, from 1920 fs/fuse/dev.c static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from) from 1928 fs/fuse/dev.c if (!iter_is_iovec(from)) from 1931 fs/fuse/dev.c fuse_copy_init(&cs, 0, from); from 1933 fs/fuse/dev.c return fuse_dev_do_write(fud, &cs, iov_iter_count(from)); from 1268 fs/fuse/file.c static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) from 1284 fs/fuse/file.c return generic_file_write_iter(iocb, from); from 1292 fs/fuse/file.c err = generic_write_checks(iocb, from); from 1306 fs/fuse/file.c written = generic_file_direct_write(iocb, from); from 1307 fs/fuse/file.c if (written < 0 || !iov_iter_count(from)) from 1312 fs/fuse/file.c written_buffered = fuse_perform_write(iocb, mapping, from, pos); from 1331 fs/fuse/file.c written = fuse_perform_write(iocb, mapping, from, iocb->ki_pos); from 1540 fs/fuse/file.c static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) from 1548 fs/fuse/file.c res = generic_write_checks(iocb, from); from 1551 fs/fuse/file.c res = fuse_direct_IO(iocb, from); from 1553 fs/fuse/file.c res = fuse_direct_io(&io, from, &iocb->ki_pos, from 1579 fs/fuse/file.c static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) from 1588 fs/fuse/file.c return fuse_cache_write_iter(iocb, from); from 1590 fs/fuse/file.c return fuse_direct_write_iter(iocb, from); from 41 fs/gfs2/aops.c unsigned int from, unsigned int len) from 46 fs/gfs2/aops.c unsigned int to = from + len; from 52 fs/gfs2/aops.c if (end <= from) from 13 fs/gfs2/aops.h unsigned int from, unsigned int len); from 1353 fs/gfs2/bmap.c static int gfs2_block_zero_range(struct inode *inode, loff_t from, from 1356 fs/gfs2/bmap.c return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops); from 770 fs/gfs2/file.c static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from) from 775 fs/gfs2/file.c size_t len = iov_iter_count(from); from 797 fs/gfs2/file.c ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL); from 831 fs/gfs2/file.c static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) from 842 fs/gfs2/file.c gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from)); from 854 fs/gfs2/file.c ret = generic_write_checks(iocb, from); from 870 fs/gfs2/file.c ret = gfs2_file_direct_write(iocb, from); from 871 fs/gfs2/file.c if (ret < 0 || !iov_iter_count(from)) from 876 fs/gfs2/file.c buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops); from 897 fs/gfs2/file.c ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops); from 212 fs/hfs/super.c if (arg->to - arg->from != 4) from 214 fs/hfs/super.c memcpy(result, arg->from, 4); from 66 fs/hfsplus/options.c if (arg->to - arg->from != 4) from 68 fs/hfsplus/options.c memcpy(result, arg->from, 4); from 84 fs/hostfs/hostfs.h extern int make_symlink(const char *from, const char *to); from 90 fs/hostfs/hostfs.h extern int link_file(const char *to, const char *from); from 92 fs/hostfs/hostfs.h extern int rename_file(char *from, char *to); from 93 fs/hostfs/hostfs.h extern int rename2_file(char *from, char *to, unsigned int flags); from 482 fs/hostfs/hostfs_kern.c unsigned from = pos & (PAGE_SIZE - 1); from 486 fs/hostfs/hostfs_kern.c err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied); from 625 fs/hostfs/hostfs_kern.c struct dentry *from) from 630 fs/hostfs/hostfs_kern.c if ((from_name = dentry_name(from)) == NULL) from 277 fs/hostfs/hostfs_user.c int make_symlink(const char *from, const char *to) from 281 fs/hostfs/hostfs_user.c err = symlink(to, from); from 327 fs/hostfs/hostfs_user.c int link_file(const char *to, const char *from) from 331 fs/hostfs/hostfs_user.c err = link(to, from); from 349 fs/hostfs/hostfs_user.c int rename_file(char *from, char *to) from 353 fs/hostfs/hostfs_user.c err = rename(from, to); from 359 fs/hostfs/hostfs_user.c int rename2_file(char *from, char *to, unsigned int flags) from 373 fs/hostfs/hostfs_user.c err = syscall(SYS_renameat2, AT_FDCWD, from, AT_FDCWD, to, flags); from 433 fs/hpfs/dnode.c static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to) from 443 fs/hpfs/dnode.c dno = from; from 477 fs/hpfs/dnode.c hpfs_error(i->i_sb, "move_to_top: got to root_dnode while moving from %08x to %08x", from, to); from 507 fs/hpfs/dnode.c a = hpfs_add_to_dnode(i, to, nde->name, nde->namelen, nde, from); from 54 fs/hpfs/name.c unsigned char *hpfs_translate_name(struct super_block *s, unsigned char *from, from 59 fs/hpfs/name.c if (hpfs_sb(s)->sb_chk >= 2) if (hpfs_is_name_long(from, len) != lng) { from 62 fs/hpfs/name.c pr_cont("%c", from[i]); from 66 fs/hpfs/name.c if (!lc) return from; from 69 fs/hpfs/name.c return from; from 71 fs/hpfs/name.c for (i = 0; i < len; i++) to[i] = locase(hpfs_sb(s)->sb_cp_table,from[i]); from 400 fs/hpfs/super.c char *rhs = args[0].from; from 429 fs/iomap/buffered-io.c iomap_is_partially_uptodate(struct page *page, unsigned long from, from 438 fs/iomap/buffered-io.c len = min_t(unsigned, PAGE_SIZE - from, count); from 441 fs/iomap/buffered-io.c first = from >> inode->i_blkbits; from 442 fs/iomap/buffered-io.c last = (from + len - 1) >> inode->i_blkbits; from 529 fs/iomap/buffered-io.c unsigned poff, unsigned plen, unsigned from, unsigned to, from 536 fs/iomap/buffered-io.c zero_user_segments(page, poff, from, to, poff + plen); from 557 fs/iomap/buffered-io.c unsigned from = offset_in_page(pos), to = from + len, poff, plen; from 569 fs/iomap/buffered-io.c if ((from > poff && from < poff + plen) || from 572 fs/iomap/buffered-io.c poff, plen, from, to, iomap); from 41 fs/jfs/ioctl.c static long jfs_map_ext2(unsigned long flags, int from) from 47 fs/jfs/ioctl.c if (from) { from 19 fs/jfs/jfs_unicode.c int jfs_strfromUCS_le(char *to, const __le16 * from, from 28 fs/jfs/jfs_unicode.c for (i = 0; (i < len) && from[i]; i++) { from 31 fs/jfs/jfs_unicode.c codepage->uni2char(le16_to_cpu(from[i]), from 40 fs/jfs/jfs_unicode.c for (i = 0; (i < len) && from[i]; i++) { from 41 fs/jfs/jfs_unicode.c if (unlikely(le16_to_cpu(from[i]) & 0xff00)) { from 48 fs/jfs/jfs_unicode.c le16_to_cpu(from[i])); from 55 fs/jfs/jfs_unicode.c to[i] = (char) (le16_to_cpu(from[i])); from 69 fs/jfs/jfs_unicode.c static int jfs_strtoUCS(wchar_t * to, const unsigned char *from, int len, from 76 fs/jfs/jfs_unicode.c for (i = 0; len && *from; i++, from += charlen, len -= charlen) from 78 fs/jfs/jfs_unicode.c charlen = codepage->char2uni(from, len, &to[i]); from 83 fs/jfs/jfs_unicode.c codepage->charset, *from); from 88 fs/jfs/jfs_unicode.c for (i = 0; (i < len) && from[i]; i++) from 89 fs/jfs/jfs_unicode.c to[i] = (wchar_t) from[i]; from 266 fs/jfs/super.c if (!strcmp(args[0].from, "none")) from 269 fs/jfs/super.c nls_map = load_nls(args[0].from); from 278 fs/jfs/super.c char *resize = args[0].from; from 295 fs/jfs/super.c char *errors = args[0].from; from 335 fs/jfs/super.c char *uid = args[0].from; from 349 fs/jfs/super.c char *gid = args[0].from; from 363 fs/jfs/super.c char *umask = args[0].from; from 397 fs/jfs/super.c char *minblks_trim = args[0].from; from 51 fs/kernfs/dir.c static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to) from 55 fs/kernfs/dir.c while (to->parent && to != from) { from 210 fs/kernfs/dir.c int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from, from 217 fs/kernfs/dir.c ret = kernfs_path_from_node_locked(to, from, buf, buflen); from 466 fs/libfs.c unsigned from = pos & (PAGE_SIZE - 1); from 468 fs/libfs.c zero_user_segments(page, 0, from, from + len, PAGE_SIZE); from 506 fs/libfs.c unsigned from = pos & (PAGE_SIZE - 1); from 508 fs/libfs.c zero_user(page, from + copied, len - copied); from 646 fs/libfs.c const void *from, size_t available) from 657 fs/libfs.c ret = copy_to_user(to, from + pos, count); from 681 fs/libfs.c const void __user *from, size_t count) from 692 fs/libfs.c res = copy_from_user(to + pos, from, count); from 716 fs/libfs.c const void *from, size_t available) from 726 fs/libfs.c memcpy(to, from + pos, count); from 18 fs/minix/itree_common.c static inline int verify_chain(Indirect *from, Indirect *to) from 20 fs/minix/itree_common.c while (from <= to && from->key == *from->p) from 21 fs/minix/itree_common.c from++; from 22 fs/minix/itree_common.c return (from > to); from 4133 fs/namei.c struct filename *from; from 4138 fs/namei.c from = getname(oldname); from 4139 fs/namei.c if (IS_ERR(from)) from 4140 fs/namei.c return PTR_ERR(from); from 4147 fs/namei.c error = security_path_symlink(&path, dentry, from->name); from 4149 fs/namei.c error = vfs_symlink(path.dentry->d_inode, dentry, from->name); from 4156 fs/namei.c putname(from); from 4520 fs/namei.c struct filename *from; from 4540 fs/namei.c from = filename_parentat(olddfd, getname(oldname), lookup_flags, from 4542 fs/namei.c if (IS_ERR(from)) { from 4543 fs/namei.c error = PTR_ERR(from); from 4644 fs/namei.c putname(from); from 2988 fs/namespace.c static long exact_copy_from_user(void *to, const void __user * from, from 2992 fs/namespace.c const char __user *f = from; from 2995 fs/namespace.c if (!access_ok(from, n)) from 593 fs/nfs/file.c ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) from 605 fs/nfs/file.c return nfs_file_direct_write(iocb, from); from 608 fs/nfs/file.c file, iov_iter_count(from), (long long) iocb->ki_pos); from 624 fs/nfs/file.c result = generic_write_checks(iocb, from); from 627 fs/nfs/file.c result = generic_perform_write(file, from, iocb->ki_pos); from 87 fs/nilfs2/dir.c static int nilfs_prepare_chunk(struct page *page, unsigned int from, from 90 fs/nilfs2/dir.c loff_t pos = page_offset(page) + from; from 92 fs/nilfs2/dir.c return __block_write_begin(page, pos, to - from, nilfs_get_block); from 97 fs/nilfs2/dir.c unsigned int from, unsigned int to) from 100 fs/nilfs2/dir.c loff_t pos = page_offset(page) + from; from 101 fs/nilfs2/dir.c unsigned int len = to - from; from 105 fs/nilfs2/dir.c nr_dirty = nilfs_page_count_clean_buffers(page, from, to); from 420 fs/nilfs2/dir.c unsigned int from = (char *)de - (char *)page_address(page); from 421 fs/nilfs2/dir.c unsigned int to = from + nilfs_rec_len_from_disk(de->rec_len); from 426 fs/nilfs2/dir.c err = nilfs_prepare_chunk(page, from, to); from 430 fs/nilfs2/dir.c nilfs_commit_chunk(page, mapping, from, to); from 451 fs/nilfs2/dir.c unsigned int from, to; from 504 fs/nilfs2/dir.c from = (char *)de - (char *)page_address(page); from 505 fs/nilfs2/dir.c to = from + rec_len; from 506 fs/nilfs2/dir.c err = nilfs_prepare_chunk(page, from, to); from 521 fs/nilfs2/dir.c nilfs_commit_chunk(page, page->mapping, from, to); from 543 fs/nilfs2/dir.c unsigned int from, to; from 547 fs/nilfs2/dir.c from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1); from 549 fs/nilfs2/dir.c de = (struct nilfs_dir_entry *)(kaddr + from); from 562 fs/nilfs2/dir.c from = (char *)pde - (char *)page_address(page); from 564 fs/nilfs2/dir.c err = nilfs_prepare_chunk(page, from, to); from 567 fs/nilfs2/dir.c pde->rec_len = nilfs_rec_len_to_disk(to - from); from 569 fs/nilfs2/dir.c nilfs_commit_chunk(page, mapping, from, to); from 692 fs/nilfs2/inode.c unsigned long from) from 706 fs/nilfs2/inode.c if (b < from) from 709 fs/nilfs2/inode.c b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from); from 425 fs/nilfs2/page.c unsigned int from, unsigned int to) from 435 fs/nilfs2/page.c if (block_end > from && block_start < to && !buffer_dirty(bh)) from 738 fs/nilfs2/super.c if (strcmp(args[0].from, "relaxed") == 0) from 741 fs/nilfs2/super.c else if (strcmp(args[0].from, "strict") == 0) from 1204 fs/nilfs2/super.c err = kstrtoull(arg->from, 0, &val); from 318 fs/ntfs/file.c struct iov_iter *from) from 333 fs/ntfs/file.c iov_iter_count(from)); from 334 fs/ntfs/file.c err = generic_write_checks(iocb, from); from 381 fs/ntfs/file.c end = (pos + iov_iter_count(from) + vol->cluster_size_mask) & from 409 fs/ntfs/file.c iov_iter_truncate(from, ll - pos); from 425 fs/ntfs/file.c iov_iter_truncate(from, ll - pos); from 1925 fs/ntfs/file.c static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) from 1935 fs/ntfs/file.c err = ntfs_prepare_file_for_write(iocb, from); from 1936 fs/ntfs/file.c if (iov_iter_count(from) && !err) from 1937 fs/ntfs/file.c written = ntfs_perform_write(file, from, iocb->ki_pos); from 6806 fs/ocfs2/alloc.c unsigned int from, unsigned int to, from 6810 fs/ocfs2/alloc.c loff_t start_byte = ((loff_t)page->index << PAGE_SHIFT) + from; from 6811 fs/ocfs2/alloc.c loff_t length = to - from; from 6813 fs/ocfs2/alloc.c ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0); from 6818 fs/ocfs2/alloc.c zero_user_segment(page, from, to); from 6826 fs/ocfs2/alloc.c from, to, &partial, from 6849 fs/ocfs2/alloc.c unsigned int from, to = PAGE_SIZE; from 6861 fs/ocfs2/alloc.c from = start & (PAGE_SIZE - 1); from 6865 fs/ocfs2/alloc.c BUG_ON(from > PAGE_SIZE); from 6868 fs/ocfs2/alloc.c ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1, from 262 fs/ocfs2/alloc.h unsigned int from, unsigned int to, from 427 fs/ocfs2/aops.c unsigned from, from 445 fs/ocfs2/aops.c if (block_end <= from || block_start >= to) { from 555 fs/ocfs2/aops.c unsigned from, unsigned to) from 564 fs/ocfs2/aops.c if (from || to) { from 565 fs/ocfs2/aops.c if (from > cluster_start) from 566 fs/ocfs2/aops.c memset(kaddr + cluster_start, 0, from - cluster_start); from 605 fs/ocfs2/aops.c struct inode *inode, unsigned int from, from 627 fs/ocfs2/aops.c if (block_start >= to || block_end <= from) { from 651 fs/ocfs2/aops.c (block_start < from || block_end > to)) { from 679 fs/ocfs2/aops.c if (block_end <= from) from 888 fs/ocfs2/aops.c static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to) from 903 fs/ocfs2/aops.c if (block_end > from && block_start < to) { from 907 fs/ocfs2/aops.c start = max(from, block_start); from 933 fs/ocfs2/aops.c unsigned from = user_pos & (PAGE_SIZE - 1), from 938 fs/ocfs2/aops.c ocfs2_zero_new_buffers(wc->w_target_page, from, to); from 948 fs/ocfs2/aops.c block_commit_write(tmppage, from, to); from 1967 fs/ocfs2/aops.c unsigned from, to, start = pos & (PAGE_SIZE - 1); from 2010 fs/ocfs2/aops.c from = wc->w_target_from; from 2013 fs/ocfs2/aops.c BUG_ON(from > PAGE_SIZE || from 2015 fs/ocfs2/aops.c to < from); from 2022 fs/ocfs2/aops.c from = 0; from 2030 fs/ocfs2/aops.c from; from 2031 fs/ocfs2/aops.c loff_t length = to - from; from 2035 fs/ocfs2/aops.c block_commit_write(tmppage, from, to); from 15 fs/ocfs2/aops.h unsigned from, from 19 fs/ocfs2/aops.h struct inode *inode, unsigned int from, from 26 fs/ocfs2/aops.h unsigned from, from 1808 fs/ocfs2/dlm/dlmrecovery.c u8 from = O2NM_MAX_NODES; from 1815 fs/ocfs2/dlm/dlmrecovery.c if (dlm_is_dummy_lock(dlm, ml, &from)) { from 1820 fs/ocfs2/dlm/dlmrecovery.c from); from 1822 fs/ocfs2/dlm/dlmrecovery.c dlm_lockres_set_refmap_bit(dlm, res, from); from 2276 fs/ocfs2/file.c struct iov_iter *from) from 2281 fs/ocfs2/file.c size_t count = iov_iter_count(from); from 2297 fs/ocfs2/file.c (unsigned int)from->nr_segs); /* GRRRRR */ from 2351 fs/ocfs2/file.c ret = generic_write_checks(iocb, from); from 2377 fs/ocfs2/file.c written = __generic_file_write_iter(iocb, from); from 2913 fs/ocfs2/refcounttree.c unsigned int from, to; from 2936 fs/ocfs2/refcounttree.c from = offset & (PAGE_SIZE - 1); from 2974 fs/ocfs2/refcounttree.c from, to, &partial, from 2983 fs/ocfs2/refcounttree.c handle, from, to, from 1377 fs/ocfs2/super.c if (((args[0].to - args[0].from) != from 1379 fs/ocfs2/super.c (strnlen(args[0].from, from 1387 fs/ocfs2/super.c memcpy(mopt->cluster_stack, args[0].from, from 461 fs/orangefs/inode.c unsigned from = pos & (PAGE_SIZE - 1); from 463 fs/orangefs/inode.c zero_user(page, from + copied, len - copied); from 468 fs/orangefs/inode.c zero_user_segment(page, from + copied, PAGE_SIZE); from 520 fs/orangefs/orangefs-bufmap.c struct orangefs_bufmap_desc *from; from 523 fs/orangefs/orangefs-bufmap.c from = &__orangefs_bufmap->desc_array[buffer_index]; from 530 fs/orangefs/orangefs-bufmap.c struct page *page = from->page_array[i]; from 546 fs/orangefs/orangefs-bufmap.c struct orangefs_bufmap_desc *from; from 549 fs/orangefs/orangefs-bufmap.c from = &__orangefs_bufmap->desc_array[buffer_index]; from 550 fs/orangefs/orangefs-bufmap.c page_from = kmap_atomic(from->page_array[slot_index]); from 381 fs/overlayfs/overlayfs.h static inline void ovl_copyattr(struct inode *from, struct inode *to) from 383 fs/overlayfs/overlayfs.h to->i_uid = from->i_uid; from 384 fs/overlayfs/overlayfs.h to->i_gid = from->i_gid; from 385 fs/overlayfs/overlayfs.h to->i_mode = from->i_mode; from 386 fs/overlayfs/overlayfs.h to->i_atime = from->i_atime; from 387 fs/overlayfs/overlayfs.h to->i_mtime = from->i_mtime; from 388 fs/overlayfs/overlayfs.h to->i_ctime = from->i_ctime; from 389 fs/overlayfs/overlayfs.h i_size_write(to, i_size_read(from)); from 392 fs/overlayfs/overlayfs.h static inline void ovl_copyflags(struct inode *from, struct inode *to) from 396 fs/overlayfs/overlayfs.h inode_set_flags(to, from->i_flags & mask, mask); from 379 fs/pipe.c pipe_write(struct kiocb *iocb, struct iov_iter *from) from 385 fs/pipe.c size_t total_len = iov_iter_count(from); from 413 fs/pipe.c ret = copy_page_from_iter(buf->page, offset, chars, from); from 420 fs/pipe.c if (!iov_iter_count(from)) from 455 fs/pipe.c copied = copy_page_from_iter(page, 0, PAGE_SIZE, from); from 456 fs/pipe.c if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) { from 476 fs/pipe.c if (!iov_iter_count(from)) from 667 fs/posix_acl.c struct user_namespace *to, struct user_namespace *from, from 692 fs/posix_acl.c uid = make_kuid(from, le32_to_cpu(entry->e_id)); from 696 fs/posix_acl.c gid = make_kgid(from, le32_to_cpu(entry->e_id)); from 29 fs/proc/proc_tty.c dev_t from, int num) from 34 fs/proc/proc_tty.c seq_printf(m, "%3d %d-%d ", MAJOR(from), MINOR(from), from 35 fs/proc/proc_tty.c MINOR(from) + num - 1); from 37 fs/proc/proc_tty.c seq_printf(m, "%3d %7d ", MAJOR(from), MINOR(from)); from 72 fs/proc/proc_tty.c dev_t from = MKDEV(p->major, p->minor_start); from 73 fs/proc/proc_tty.c dev_t to = from + p->num; from 95 fs/proc/proc_tty.c while (MAJOR(from) < MAJOR(to)) { from 96 fs/proc/proc_tty.c dev_t next = MKDEV(MAJOR(from)+1, 0); from 97 fs/proc/proc_tty.c show_tty_range(m, p, from, next - from); from 98 fs/proc/proc_tty.c from = next; from 100 fs/proc/proc_tty.c if (from != to) from 101 fs/proc/proc_tty.c show_tty_range(m, p, from, to - from); from 188 fs/proc/vmcore.c unsigned long from, unsigned long pfn, from 192 fs/proc/vmcore.c return remap_pfn_range(vma, from, pfn, size, prot); from 488 fs/proc/vmcore.c unsigned long from, unsigned long pfn, from 509 fs/proc/vmcore.c if (remap_oldmem_pfn_range(vma, from + len, from 516 fs/proc/vmcore.c if (remap_oldmem_pfn_range(vma, from + len, from 527 fs/proc/vmcore.c if (remap_oldmem_pfn_range(vma, from + len, pos_start, from 533 fs/proc/vmcore.c do_munmap(vma->vm_mm, from, len, NULL); from 538 fs/proc/vmcore.c unsigned long from, unsigned long pfn, from 546 fs/proc/vmcore.c return remap_oldmem_pfn_checked(vma, from, pfn, size, prot); from 548 fs/proc/vmcore.c return remap_oldmem_pfn_range(vma, from, pfn, size, prot); from 177 fs/reiserfs/file.c unsigned from, unsigned to) from 208 fs/reiserfs/file.c if (block_end <= from || block_start >= to) { from 375 fs/reiserfs/fix_node.c int from, int from_bytes, from 431 fs/reiserfs/fix_node.c i = ((to - from) * (KEY_SIZE + DC_SIZE) + DC_SIZE); from 442 fs/reiserfs/fix_node.c start_item = from; from 555 fs/reiserfs/fix_node.c ((from == split_item_num from 583 fs/reiserfs/fix_node.c ((from == split_item_num from 277 fs/reiserfs/ibalance.c static void internal_delete_childs(struct buffer_info *cur_bi, int from, int n) from 281 fs/reiserfs/ibalance.c i_from = (from == 0) ? from : from - 1; from 287 fs/reiserfs/ibalance.c internal_delete_pointers_items(cur_bi, from, i_from, n); from 25 fs/reiserfs/inode.c unsigned from, unsigned to); from 2835 fs/reiserfs/inode.c int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len) from 2857 fs/reiserfs/inode.c ret = __block_write_begin(page, from, len, reiserfs_get_block); from 2999 fs/reiserfs/inode.c unsigned from, unsigned to) from 3015 fs/reiserfs/inode.c reiserfs_commit_page(inode, page, from, to); from 162 fs/reiserfs/ioctl.c unsigned from, unsigned to); from 583 fs/reiserfs/item_ops.c int from, to; from 588 fs/reiserfs/item_ops.c from = 0; from 590 fs/reiserfs/item_ops.c from = dir_u->entry_count - count; from 591 fs/reiserfs/item_ops.c to = from + count - 1; from 593 fs/reiserfs/item_ops.c for (i = from; i <= to; i++) from 17 fs/reiserfs/lbalance.c int item_num, int from, int copy_count) from 41 fs/reiserfs/lbalance.c copy_records_len = (from ? deh_location(&deh[from - 1]) : from 43 fs/reiserfs/lbalance.c deh_location(&deh[from + copy_count - 1]); from 46 fs/reiserfs/lbalance.c deh_location(&deh[from + copy_count - 1]); from 82 fs/reiserfs/lbalance.c if (from < ih_entry_count(ih)) { from 84 fs/reiserfs/lbalance.c deh_offset(&deh[from])); from 123 fs/reiserfs/lbalance.c : 0, copy_count, deh + from, records, from 1073 fs/reiserfs/lbalance.c struct item_head *ih, int from, int del_count) from 1087 fs/reiserfs/lbalance.c RFALSE(ih_entry_count(ih) < from + del_count, from 1089 fs/reiserfs/lbalance.c ih_entry_count(ih), from, del_count); from 1105 fs/reiserfs/lbalance.c (from ? deh_location(&deh[from - 1]) : ih_item_len(ih)); from 1107 fs/reiserfs/lbalance.c deh_location(&deh[from + del_count - 1]); from 1111 fs/reiserfs/lbalance.c for (i = ih_entry_count(ih) - 1; i > from + del_count - 1; i--) from 1116 fs/reiserfs/lbalance.c for (i = 0; i < from; i++) from 1124 fs/reiserfs/lbalance.c memmove((char *)(deh + from), from 1125 fs/reiserfs/lbalance.c deh + from + del_count, from 1126 fs/reiserfs/lbalance.c prev_record - cut_records_len - (char *)(deh + from + from 447 fs/reiserfs/prints.c int from, to; from 455 fs/reiserfs/prints.c from = 0; from 458 fs/reiserfs/prints.c from = first; from 464 fs/reiserfs/prints.c dc = B_N_CHILD(bh, from); from 465 fs/reiserfs/prints.c reiserfs_printk("PTR %d: %y ", from, dc); from 467 fs/reiserfs/prints.c for (i = from, key = internal_key(bh, from), dc++; i < to; from 483 fs/reiserfs/prints.c int from, to; from 505 fs/reiserfs/prints.c from = 0; from 507 fs/reiserfs/prints.c from = first; from 514 fs/reiserfs/prints.c ih += from; from 519 fs/reiserfs/prints.c for (i = from; i < to; i++, ih++) { from 2641 fs/reiserfs/reiserfs.h int (*part_size) (struct virtual_item * vi, int from, int to); from 2655 fs/reiserfs/reiserfs.h #define op_part_size(vi,from,to) item_ops[(vi)->vi_index]->part_size (vi, from, to) from 2919 fs/reiserfs/reiserfs.h unsigned from, unsigned to); from 2968 fs/reiserfs/reiserfs.h const struct item_head *from); from 2973 fs/reiserfs/reiserfs.h extern void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from); from 2996 fs/reiserfs/reiserfs.h const struct reiserfs_key *from) from 2998 fs/reiserfs/reiserfs.h memcpy(to, from, KEY_SIZE); from 3107 fs/reiserfs/reiserfs.h int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len); from 31 fs/reiserfs/stree.c const struct item_head *from) from 33 fs/reiserfs/stree.c memcpy(to, from, IH_SIZE); from 115 fs/reiserfs/stree.c inline void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from) from 118 fs/reiserfs/stree.c to->on_disk_key.k_dir_id = le32_to_cpu(from->k_dir_id); from 119 fs/reiserfs/stree.c to->on_disk_key.k_objectid = le32_to_cpu(from->k_objectid); from 122 fs/reiserfs/stree.c version = le_key_version(from); from 124 fs/reiserfs/stree.c to->on_disk_key.k_offset = le_key_k_offset(version, from); from 125 fs/reiserfs/stree.c to->on_disk_key.k_type = le_key_k_type(version, from); from 470 fs/reiserfs/xattr.c unsigned from, unsigned to); from 99 fs/seq_file.c m->count = m->from = 0; from 124 fs/seq_file.c m->from = offset - pos; from 125 fs/seq_file.c m->count -= m->from; from 211 fs/seq_file.c err = copy_to_user(buf, m->buf + m->from, n); from 215 fs/seq_file.c m->from += n; from 223 fs/seq_file.c m->from = 0; from 282 fs/seq_file.c m->from = n; from 701 fs/splice.c struct iov_iter from; from 745 fs/splice.c iov_iter_bvec(&from, WRITE, array, n, sd.total_len - left); from 746 fs/splice.c ret = vfs_iter_write(out, &from, &sd.pos, 0); from 1207 fs/splice.c static int iter_to_pipe(struct iov_iter *from, from 1219 fs/splice.c while (iov_iter_count(from) && !failed) { from 1225 fs/splice.c copied = iov_iter_get_pages(from, pages, ~0UL, 16, &start); from 1241 fs/splice.c iov_iter_advance(from, ret); from 110 fs/sysv/inode.c unsigned char * from, unsigned char * to) from 113 fs/sysv/inode.c to[0] = from[0]; from 115 fs/sysv/inode.c to[2] = from[1]; from 116 fs/sysv/inode.c to[3] = from[2]; from 118 fs/sysv/inode.c to[0] = from[0]; from 119 fs/sysv/inode.c to[1] = from[1]; from 120 fs/sysv/inode.c to[2] = from[2]; from 124 fs/sysv/inode.c to[1] = from[0]; from 125 fs/sysv/inode.c to[2] = from[1]; from 126 fs/sysv/inode.c to[3] = from[2]; from 131 fs/sysv/inode.c unsigned char * from, unsigned char * to) from 134 fs/sysv/inode.c to[0] = from[0]; from 135 fs/sysv/inode.c to[1] = from[2]; from 136 fs/sysv/inode.c to[2] = from[3]; from 138 fs/sysv/inode.c to[0] = from[0]; from 139 fs/sysv/inode.c to[1] = from[1]; from 140 fs/sysv/inode.c to[2] = from[2]; from 142 fs/sysv/inode.c to[0] = from[1]; from 143 fs/sysv/inode.c to[1] = from[2]; from 144 fs/sysv/inode.c to[2] = from[3]; from 73 fs/sysv/itree.c static inline int verify_chain(Indirect *from, Indirect *to) from 75 fs/sysv/itree.c while (from <= to && from->key == *from->p) from 76 fs/sysv/itree.c from++; from 77 fs/sysv/itree.c return (from > to); from 2550 fs/ubifs/debug.c unsigned int from, to, ffs = chance(1, 2); from 2553 fs/ubifs/debug.c from = prandom_u32() % len; from 2555 fs/ubifs/debug.c to = min(len, ALIGN(from + 1, c->max_write_size)); from 2557 fs/ubifs/debug.c ubifs_warn(c, "filled bytes %u-%u with %s", from, to - 1, from 2561 fs/ubifs/debug.c memset(p + from, 0xFF, to - from); from 2563 fs/ubifs/debug.c prandom_bytes(p + from, to - from); from 1438 fs/ubifs/file.c static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from) from 1444 fs/ubifs/file.c return generic_file_write_iter(iocb, from); from 418 fs/ubifs/key.h static inline void key_read(const struct ubifs_info *c, const void *from, from 421 fs/ubifs/key.h const union ubifs_key *f = from; from 434 fs/ubifs/key.h const union ubifs_key *from, void *to) from 438 fs/ubifs/key.h t->j32[0] = cpu_to_le32(from->u32[0]); from 439 fs/ubifs/key.h t->j32[1] = cpu_to_le32(from->u32[1]); from 450 fs/ubifs/key.h const union ubifs_key *from, void *to) from 454 fs/ubifs/key.h t->j32[0] = cpu_to_le32(from->u32[0]); from 455 fs/ubifs/key.h t->j32[1] = cpu_to_le32(from->u32[1]); from 465 fs/ubifs/key.h const union ubifs_key *from, union ubifs_key *to) from 467 fs/ubifs/key.h to->u64[0] = from->u64[0]; from 362 fs/ubifs/lpt_commit.c int lnum, offs, len, from, err, wlen, alen, done_ltab, done_lsave; from 371 fs/ubifs/lpt_commit.c from = offs; from 402 fs/ubifs/lpt_commit.c wlen = offs - from; from 406 fs/ubifs/lpt_commit.c err = ubifs_leb_write(c, lnum, buf + from, from, from 415 fs/ubifs/lpt_commit.c offs = from = 0; from 462 fs/ubifs/lpt_commit.c wlen = offs - from; from 465 fs/ubifs/lpt_commit.c err = ubifs_leb_write(c, lnum, buf + from, from, alen); from 472 fs/ubifs/lpt_commit.c offs = from = 0; from 488 fs/ubifs/lpt_commit.c wlen = offs - from; from 491 fs/ubifs/lpt_commit.c err = ubifs_leb_write(c, lnum, buf + from, from, alen); from 498 fs/ubifs/lpt_commit.c offs = from = 0; from 511 fs/ubifs/lpt_commit.c wlen = offs - from; from 514 fs/ubifs/lpt_commit.c err = ubifs_leb_write(c, lnum, buf + from, from, alen); from 1095 fs/ubifs/super.c c->auth_key_name = kstrdup(args[0].from, GFP_KERNEL); from 1100 fs/ubifs/super.c c->auth_hash_name = kstrdup(args[0].from, GFP_KERNEL); from 1638 fs/ubifs/ubifs.h static inline void ubifs_copy_hash(const struct ubifs_info *c, const u8 *from, from 1642 fs/ubifs/ubifs.h memcpy(to, from, c->hash_len); from 136 fs/udf/file.c static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from) from 146 fs/udf/file.c retval = generic_write_checks(iocb, from); from 152 fs/udf/file.c loff_t end = iocb->ki_pos + iov_iter_count(from); from 169 fs/udf/file.c retval = __generic_file_write_iter(iocb, from); from 571 fs/udf/super.c uopt->nls_map = load_nls(args[0].from); from 32 fs/udf/symlink.c static int udf_pc_to_char(struct super_block *sb, unsigned char *from, from 43 fs/udf/symlink.c pc = (struct pathComponent *)(from + elen); from 500 fs/ufs/dir.c unsigned from = ((char*)dir - kaddr) & ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); from 504 fs/ufs/dir.c struct ufs_dir_entry *de = (struct ufs_dir_entry *) (kaddr + from); from 525 fs/ufs/dir.c from = (char*)pde - (char*)page_address(page); from 527 fs/ufs/dir.c pos = page_offset(page) + from; from 529 fs/ufs/dir.c err = ufs_prepare_chunk(page, pos, to - from); from 532 fs/ufs/dir.c pde->d_reclen = cpu_to_fs16(sb, to - from); from 534 fs/ufs/dir.c err = ufs_commit_chunk(page, pos, to - from); from 89 fs/ufs/inode.c Indirect *from, Indirect *to) from 97 fs/ufs/inode.c for (p = from; p <= to && p->key32 == *(__fs32 *)p->p; p++) from 105 fs/ufs/inode.c Indirect *from, Indirect *to) from 113 fs/ufs/inode.c for (p = from; p <= to && p->key64 == *(__fs64 *)p->p; p++) from 876 fs/ufs/inode.c static inline void free_data(struct to_free *ctx, u64 from, unsigned count) from 878 fs/ufs/inode.c if (ctx->count && ctx->to != from) { from 883 fs/ufs/inode.c ctx->to = from + count; from 1009 fs/ufs/inode.c static void free_branch_tail(struct inode *inode, unsigned from, struct ufs_buffer_head *ubh, int depth) from 1016 fs/ufs/inode.c for (i = from; i < uspi->s_apb ; i++) { from 1030 fs/ufs/inode.c for (i = from; i < uspi->s_apb; i++) { from 766 fs/userfaultfd.c unsigned long from, unsigned long to, from 783 fs/userfaultfd.c ewq.msg.arg.remap.from = from; from 95 fs/xfs/libxfs/xfs_attr_leaf.c struct xfs_attr_leafblock *from) from 99 fs/xfs/libxfs/xfs_attr_leaf.c if (from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)) { from 100 fs/xfs/libxfs/xfs_attr_leaf.c hdr3 = (struct xfs_attr3_leaf_hdr *) from; from 103 fs/xfs/libxfs/xfs_attr_leaf.c to->firstused = be16_to_cpu(from->hdr.firstused); from 122 fs/xfs/libxfs/xfs_attr_leaf.c struct xfs_attr3_icleaf_hdr *from) from 128 fs/xfs/libxfs/xfs_attr_leaf.c ASSERT(from->firstused != XFS_ATTR3_LEAF_NULLOFF); from 135 fs/xfs/libxfs/xfs_attr_leaf.c firstused = from->firstused; from 137 fs/xfs/libxfs/xfs_attr_leaf.c ASSERT(from->firstused == geo->blksize); from 141 fs/xfs/libxfs/xfs_attr_leaf.c if (from->magic == XFS_ATTR3_LEAF_MAGIC) { from 153 fs/xfs/libxfs/xfs_attr_leaf.c struct xfs_attr_leafblock *from) from 157 fs/xfs/libxfs/xfs_attr_leaf.c ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) || from 158 fs/xfs/libxfs/xfs_attr_leaf.c from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)); from 160 fs/xfs/libxfs/xfs_attr_leaf.c if (from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)) { from 161 fs/xfs/libxfs/xfs_attr_leaf.c struct xfs_attr3_leaf_hdr *hdr3 = (struct xfs_attr3_leaf_hdr *)from; from 168 fs/xfs/libxfs/xfs_attr_leaf.c xfs_attr3_leaf_firstused_from_disk(geo, to, from); from 177 fs/xfs/libxfs/xfs_attr_leaf.c to->forw = be32_to_cpu(from->hdr.info.forw); from 178 fs/xfs/libxfs/xfs_attr_leaf.c to->back = be32_to_cpu(from->hdr.info.back); from 179 fs/xfs/libxfs/xfs_attr_leaf.c to->magic = be16_to_cpu(from->hdr.info.magic); from 180 fs/xfs/libxfs/xfs_attr_leaf.c to->count = be16_to_cpu(from->hdr.count); from 181 fs/xfs/libxfs/xfs_attr_leaf.c to->usedbytes = be16_to_cpu(from->hdr.usedbytes); from 182 fs/xfs/libxfs/xfs_attr_leaf.c xfs_attr3_leaf_firstused_from_disk(geo, to, from); from 183 fs/xfs/libxfs/xfs_attr_leaf.c to->holes = from->hdr.holes; from 186 fs/xfs/libxfs/xfs_attr_leaf.c to->freemap[i].base = be16_to_cpu(from->hdr.freemap[i].base); from 187 fs/xfs/libxfs/xfs_attr_leaf.c to->freemap[i].size = be16_to_cpu(from->hdr.freemap[i].size); from 195 fs/xfs/libxfs/xfs_attr_leaf.c struct xfs_attr3_icleaf_hdr *from) from 199 fs/xfs/libxfs/xfs_attr_leaf.c ASSERT(from->magic == XFS_ATTR_LEAF_MAGIC || from 200 fs/xfs/libxfs/xfs_attr_leaf.c from->magic == XFS_ATTR3_LEAF_MAGIC); from 202 fs/xfs/libxfs/xfs_attr_leaf.c if (from->magic == XFS_ATTR3_LEAF_MAGIC) { from 205 fs/xfs/libxfs/xfs_attr_leaf.c hdr3->info.hdr.forw = cpu_to_be32(from->forw); from 206 fs/xfs/libxfs/xfs_attr_leaf.c hdr3->info.hdr.back = cpu_to_be32(from->back); from 207 fs/xfs/libxfs/xfs_attr_leaf.c hdr3->info.hdr.magic = cpu_to_be16(from->magic); from 208 fs/xfs/libxfs/xfs_attr_leaf.c hdr3->count = cpu_to_be16(from->count); from 209 fs/xfs/libxfs/xfs_attr_leaf.c hdr3->usedbytes = cpu_to_be16(from->usedbytes); from 210 fs/xfs/libxfs/xfs_attr_leaf.c xfs_attr3_leaf_firstused_to_disk(geo, to, from); from 211 fs/xfs/libxfs/xfs_attr_leaf.c hdr3->holes = from->holes; from 215 fs/xfs/libxfs/xfs_attr_leaf.c hdr3->freemap[i].base = cpu_to_be16(from->freemap[i].base); from 216 fs/xfs/libxfs/xfs_attr_leaf.c hdr3->freemap[i].size = cpu_to_be16(from->freemap[i].size); from 220 fs/xfs/libxfs/xfs_attr_leaf.c to->hdr.info.forw = cpu_to_be32(from->forw); from 221 fs/xfs/libxfs/xfs_attr_leaf.c to->hdr.info.back = cpu_to_be32(from->back); from 222 fs/xfs/libxfs/xfs_attr_leaf.c to->hdr.info.magic = cpu_to_be16(from->magic); from 223 fs/xfs/libxfs/xfs_attr_leaf.c to->hdr.count = cpu_to_be16(from->count); from 224 fs/xfs/libxfs/xfs_attr_leaf.c to->hdr.usedbytes = cpu_to_be16(from->usedbytes); from 225 fs/xfs/libxfs/xfs_attr_leaf.c xfs_attr3_leaf_firstused_to_disk(geo, to, from); from 226 fs/xfs/libxfs/xfs_attr_leaf.c to->hdr.holes = from->holes; from 230 fs/xfs/libxfs/xfs_attr_leaf.c to->hdr.freemap[i].base = cpu_to_be16(from->freemap[i].base); from 231 fs/xfs/libxfs/xfs_attr_leaf.c to->hdr.freemap[i].size = cpu_to_be16(from->freemap[i].size); from 92 fs/xfs/libxfs/xfs_attr_leaf.h struct xfs_attr_leafblock *from); from 95 fs/xfs/libxfs/xfs_attr_leaf.h struct xfs_attr3_icleaf_hdr *from); from 112 fs/xfs/libxfs/xfs_da_format.c uint8_t *from) from 115 fs/xfs/libxfs/xfs_da_format.c return get_unaligned_be64(from) & 0x00ffffffffffffffULL; from 117 fs/xfs/libxfs/xfs_da_format.c return get_unaligned_be32(from); from 435 fs/xfs/libxfs/xfs_da_format.c struct xfs_dir2_leaf *from) from 437 fs/xfs/libxfs/xfs_da_format.c to->forw = be32_to_cpu(from->hdr.info.forw); from 438 fs/xfs/libxfs/xfs_da_format.c to->back = be32_to_cpu(from->hdr.info.back); from 439 fs/xfs/libxfs/xfs_da_format.c to->magic = be16_to_cpu(from->hdr.info.magic); from 440 fs/xfs/libxfs/xfs_da_format.c to->count = be16_to_cpu(from->hdr.count); from 441 fs/xfs/libxfs/xfs_da_format.c to->stale = be16_to_cpu(from->hdr.stale); from 450 fs/xfs/libxfs/xfs_da_format.c struct xfs_dir3_icleaf_hdr *from) from 452 fs/xfs/libxfs/xfs_da_format.c ASSERT(from->magic == XFS_DIR2_LEAF1_MAGIC || from 453 fs/xfs/libxfs/xfs_da_format.c from->magic == XFS_DIR2_LEAFN_MAGIC); from 455 fs/xfs/libxfs/xfs_da_format.c to->hdr.info.forw = cpu_to_be32(from->forw); from 456 fs/xfs/libxfs/xfs_da_format.c to->hdr.info.back = cpu_to_be32(from->back); from 457 fs/xfs/libxfs/xfs_da_format.c to->hdr.info.magic = cpu_to_be16(from->magic); from 458 fs/xfs/libxfs/xfs_da_format.c to->hdr.count = cpu_to_be16(from->count); from 459 fs/xfs/libxfs/xfs_da_format.c to->hdr.stale = cpu_to_be16(from->stale); from 465 fs/xfs/libxfs/xfs_da_format.c struct xfs_dir2_leaf *from) from 467 fs/xfs/libxfs/xfs_da_format.c struct xfs_dir3_leaf_hdr *hdr3 = (struct xfs_dir3_leaf_hdr *)from; from 482 fs/xfs/libxfs/xfs_da_format.c struct xfs_dir3_icleaf_hdr *from) from 486 fs/xfs/libxfs/xfs_da_format.c ASSERT(from->magic == XFS_DIR3_LEAF1_MAGIC || from 487 fs/xfs/libxfs/xfs_da_format.c from->magic == XFS_DIR3_LEAFN_MAGIC); from 489 fs/xfs/libxfs/xfs_da_format.c hdr3->info.hdr.forw = cpu_to_be32(from->forw); from 490 fs/xfs/libxfs/xfs_da_format.c hdr3->info.hdr.back = cpu_to_be32(from->back); from 491 fs/xfs/libxfs/xfs_da_format.c hdr3->info.hdr.magic = cpu_to_be16(from->magic); from 492 fs/xfs/libxfs/xfs_da_format.c hdr3->count = cpu_to_be16(from->count); from 493 fs/xfs/libxfs/xfs_da_format.c hdr3->stale = cpu_to_be16(from->stale); from 515 fs/xfs/libxfs/xfs_da_format.c struct xfs_da_intnode *from) from 517 fs/xfs/libxfs/xfs_da_format.c ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); from 518 fs/xfs/libxfs/xfs_da_format.c to->forw = be32_to_cpu(from->hdr.info.forw); from 519 fs/xfs/libxfs/xfs_da_format.c to->back = be32_to_cpu(from->hdr.info.back); from 520 fs/xfs/libxfs/xfs_da_format.c to->magic = be16_to_cpu(from->hdr.info.magic); from 521 fs/xfs/libxfs/xfs_da_format.c to->count = be16_to_cpu(from->hdr.__count); from 522 fs/xfs/libxfs/xfs_da_format.c to->level = be16_to_cpu(from->hdr.__level); from 528 fs/xfs/libxfs/xfs_da_format.c struct xfs_da3_icnode_hdr *from) from 530 fs/xfs/libxfs/xfs_da_format.c ASSERT(from->magic == XFS_DA_NODE_MAGIC); from 531 fs/xfs/libxfs/xfs_da_format.c to->hdr.info.forw = cpu_to_be32(from->forw); from 532 fs/xfs/libxfs/xfs_da_format.c to->hdr.info.back = cpu_to_be32(from->back); from 533 fs/xfs/libxfs/xfs_da_format.c to->hdr.info.magic = cpu_to_be16(from->magic); from 534 fs/xfs/libxfs/xfs_da_format.c to->hdr.__count = cpu_to_be16(from->count); from 535 fs/xfs/libxfs/xfs_da_format.c to->hdr.__level = cpu_to_be16(from->level); from 541 fs/xfs/libxfs/xfs_da_format.c struct xfs_da_intnode *from) from 543 fs/xfs/libxfs/xfs_da_format.c struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)from; from 545 fs/xfs/libxfs/xfs_da_format.c ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)); from 556 fs/xfs/libxfs/xfs_da_format.c struct xfs_da3_icnode_hdr *from) from 560 fs/xfs/libxfs/xfs_da_format.c ASSERT(from->magic == XFS_DA3_NODE_MAGIC); from 561 fs/xfs/libxfs/xfs_da_format.c hdr3->info.hdr.forw = cpu_to_be32(from->forw); from 562 fs/xfs/libxfs/xfs_da_format.c hdr3->info.hdr.back = cpu_to_be32(from->back); from 563 fs/xfs/libxfs/xfs_da_format.c hdr3->info.hdr.magic = cpu_to_be16(from->magic); from 564 fs/xfs/libxfs/xfs_da_format.c hdr3->__count = cpu_to_be16(from->count); from 565 fs/xfs/libxfs/xfs_da_format.c hdr3->__level = cpu_to_be16(from->level); from 639 fs/xfs/libxfs/xfs_da_format.c struct xfs_dir2_free *from) from 641 fs/xfs/libxfs/xfs_da_format.c to->magic = be32_to_cpu(from->hdr.magic); from 642 fs/xfs/libxfs/xfs_da_format.c to->firstdb = be32_to_cpu(from->hdr.firstdb); from 643 fs/xfs/libxfs/xfs_da_format.c to->nvalid = be32_to_cpu(from->hdr.nvalid); from 644 fs/xfs/libxfs/xfs_da_format.c to->nused = be32_to_cpu(from->hdr.nused); from 651 fs/xfs/libxfs/xfs_da_format.c struct xfs_dir3_icfree_hdr *from) from 653 fs/xfs/libxfs/xfs_da_format.c ASSERT(from->magic == XFS_DIR2_FREE_MAGIC); from 655 fs/xfs/libxfs/xfs_da_format.c to->hdr.magic = cpu_to_be32(from->magic); from 656 fs/xfs/libxfs/xfs_da_format.c to->hdr.firstdb = cpu_to_be32(from->firstdb); from 657 fs/xfs/libxfs/xfs_da_format.c to->hdr.nvalid = cpu_to_be32(from->nvalid); from 658 fs/xfs/libxfs/xfs_da_format.c to->hdr.nused = cpu_to_be32(from->nused); from 664 fs/xfs/libxfs/xfs_da_format.c struct xfs_dir2_free *from) from 666 fs/xfs/libxfs/xfs_da_format.c struct xfs_dir3_free_hdr *hdr3 = (struct xfs_dir3_free_hdr *)from; from 679 fs/xfs/libxfs/xfs_da_format.c struct xfs_dir3_icfree_hdr *from) from 683 fs/xfs/libxfs/xfs_da_format.c ASSERT(from->magic == XFS_DIR3_FREE_MAGIC); from 685 fs/xfs/libxfs/xfs_da_format.c hdr3->hdr.magic = cpu_to_be32(from->magic); from 686 fs/xfs/libxfs/xfs_da_format.c hdr3->firstdb = cpu_to_be32(from->firstdb); from 687 fs/xfs/libxfs/xfs_da_format.c hdr3->nvalid = cpu_to_be32(from->nvalid); from 688 fs/xfs/libxfs/xfs_da_format.c hdr3->nused = cpu_to_be32(from->nused); from 75 fs/xfs/libxfs/xfs_dir2.h struct xfs_dir3_icleaf_hdr *from); from 77 fs/xfs/libxfs/xfs_dir2.h struct xfs_dir2_leaf *from); from 84 fs/xfs/libxfs/xfs_dir2.h struct xfs_da3_icnode_hdr *from); from 86 fs/xfs/libxfs/xfs_dir2.h struct xfs_da_intnode *from); from 92 fs/xfs/libxfs/xfs_dir2.h struct xfs_dir3_icfree_hdr *from); from 94 fs/xfs/libxfs/xfs_dir2.h struct xfs_dir2_free *from); from 905 fs/xfs/libxfs/xfs_dir2_block.c int from; /* leaf from index */ from 1006 fs/xfs/libxfs/xfs_dir2_block.c for (from = to = 0; from < leafhdr.count; from++) { from 1007 fs/xfs/libxfs/xfs_dir2_block.c if (ents[from].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) from 1009 fs/xfs/libxfs/xfs_dir2_block.c lep[to++] = ents[from]; from 873 fs/xfs/libxfs/xfs_dir2_leaf.c int from; /* source leaf index */ from 888 fs/xfs/libxfs/xfs_dir2_leaf.c for (from = to = 0, loglow = -1; from < leafhdr->count; from++) { from 889 fs/xfs/libxfs/xfs_dir2_leaf.c if (ents[from].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) from 894 fs/xfs/libxfs/xfs_dir2_leaf.c if (from > to) { from 897 fs/xfs/libxfs/xfs_dir2_leaf.c ents[to] = ents[from]; from 904 fs/xfs/libxfs/xfs_dir2_leaf.c ASSERT(leafhdr->stale == from - to); from 932 fs/xfs/libxfs/xfs_dir2_leaf.c int from; /* source copy index */ from 958 fs/xfs/libxfs/xfs_dir2_leaf.c for (from = to = 0; from < leafhdr->count; from++) { from 962 fs/xfs/libxfs/xfs_dir2_leaf.c if (index == from) from 964 fs/xfs/libxfs/xfs_dir2_leaf.c if (from != keepstale && from 965 fs/xfs/libxfs/xfs_dir2_leaf.c ents[from].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) { from 966 fs/xfs/libxfs/xfs_dir2_leaf.c if (from == to) from 973 fs/xfs/libxfs/xfs_dir2_leaf.c if (from == keepstale) from 978 fs/xfs/libxfs/xfs_dir2_leaf.c if (from > to) from 979 fs/xfs/libxfs/xfs_dir2_leaf.c ents[to] = ents[from]; from 982 fs/xfs/libxfs/xfs_dir2_leaf.c ASSERT(from > to); from 987 fs/xfs/libxfs/xfs_dir2_leaf.c if (index == from) from 993 fs/xfs/libxfs/xfs_dir2_leaf.c leafhdr->count -= from - to; from 343 fs/xfs/libxfs/xfs_dir2_node.c __be16 *from; /* pointer to freespace entry */ from 383 fs/xfs/libxfs/xfs_dir2_node.c from = xfs_dir2_leaf_bests_p(ltp); from 385 fs/xfs/libxfs/xfs_dir2_node.c for (i = n = 0; i < be32_to_cpu(ltp->bestcount); i++, from++, to++) { from 386 fs/xfs/libxfs/xfs_dir2_node.c if ((off = be16_to_cpu(*from)) != NULLDATAOFF) from 203 fs/xfs/libxfs/xfs_inode_buf.c struct xfs_dinode *from) from 213 fs/xfs/libxfs/xfs_inode_buf.c to->di_version = from->di_version; from 215 fs/xfs/libxfs/xfs_inode_buf.c set_nlink(inode, be16_to_cpu(from->di_onlink)); from 220 fs/xfs/libxfs/xfs_inode_buf.c set_nlink(inode, be32_to_cpu(from->di_nlink)); from 221 fs/xfs/libxfs/xfs_inode_buf.c to->di_projid_lo = be16_to_cpu(from->di_projid_lo); from 222 fs/xfs/libxfs/xfs_inode_buf.c to->di_projid_hi = be16_to_cpu(from->di_projid_hi); from 225 fs/xfs/libxfs/xfs_inode_buf.c to->di_format = from->di_format; from 226 fs/xfs/libxfs/xfs_inode_buf.c to->di_uid = be32_to_cpu(from->di_uid); from 227 fs/xfs/libxfs/xfs_inode_buf.c to->di_gid = be32_to_cpu(from->di_gid); from 228 fs/xfs/libxfs/xfs_inode_buf.c to->di_flushiter = be16_to_cpu(from->di_flushiter); from 236 fs/xfs/libxfs/xfs_inode_buf.c inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec); from 237 fs/xfs/libxfs/xfs_inode_buf.c inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec); from 238 fs/xfs/libxfs/xfs_inode_buf.c inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec); from 239 fs/xfs/libxfs/xfs_inode_buf.c inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec); from 240 fs/xfs/libxfs/xfs_inode_buf.c inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec); from 241 fs/xfs/libxfs/xfs_inode_buf.c inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec); from 242 fs/xfs/libxfs/xfs_inode_buf.c inode->i_generation = be32_to_cpu(from->di_gen); from 243 fs/xfs/libxfs/xfs_inode_buf.c inode->i_mode = be16_to_cpu(from->di_mode); from 245 fs/xfs/libxfs/xfs_inode_buf.c to->di_size = be64_to_cpu(from->di_size); from 246 fs/xfs/libxfs/xfs_inode_buf.c to->di_nblocks = be64_to_cpu(from->di_nblocks); from 247 fs/xfs/libxfs/xfs_inode_buf.c to->di_extsize = be32_to_cpu(from->di_extsize); from 248 fs/xfs/libxfs/xfs_inode_buf.c to->di_nextents = be32_to_cpu(from->di_nextents); from 249 fs/xfs/libxfs/xfs_inode_buf.c to->di_anextents = be16_to_cpu(from->di_anextents); from 250 fs/xfs/libxfs/xfs_inode_buf.c to->di_forkoff = from->di_forkoff; from 251 fs/xfs/libxfs/xfs_inode_buf.c to->di_aformat = from->di_aformat; from 252 fs/xfs/libxfs/xfs_inode_buf.c to->di_dmevmask = be32_to_cpu(from->di_dmevmask); from 253 fs/xfs/libxfs/xfs_inode_buf.c to->di_dmstate = be16_to_cpu(from->di_dmstate); from 254 fs/xfs/libxfs/xfs_inode_buf.c to->di_flags = be16_to_cpu(from->di_flags); from 258 fs/xfs/libxfs/xfs_inode_buf.c be64_to_cpu(from->di_changecount)); from 259 fs/xfs/libxfs/xfs_inode_buf.c to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec); from 260 fs/xfs/libxfs/xfs_inode_buf.c to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec); from 261 fs/xfs/libxfs/xfs_inode_buf.c to->di_flags2 = be64_to_cpu(from->di_flags2); from 262 fs/xfs/libxfs/xfs_inode_buf.c to->di_cowextsize = be32_to_cpu(from->di_cowextsize); from 272 fs/xfs/libxfs/xfs_inode_buf.c struct xfs_icdinode *from = &ip->i_d; from 278 fs/xfs/libxfs/xfs_inode_buf.c to->di_version = from->di_version; from 279 fs/xfs/libxfs/xfs_inode_buf.c to->di_format = from->di_format; from 280 fs/xfs/libxfs/xfs_inode_buf.c to->di_uid = cpu_to_be32(from->di_uid); from 281 fs/xfs/libxfs/xfs_inode_buf.c to->di_gid = cpu_to_be32(from->di_gid); from 282 fs/xfs/libxfs/xfs_inode_buf.c to->di_projid_lo = cpu_to_be16(from->di_projid_lo); from 283 fs/xfs/libxfs/xfs_inode_buf.c to->di_projid_hi = cpu_to_be16(from->di_projid_hi); from 296 fs/xfs/libxfs/xfs_inode_buf.c to->di_size = cpu_to_be64(from->di_size); from 297 fs/xfs/libxfs/xfs_inode_buf.c to->di_nblocks = cpu_to_be64(from->di_nblocks); from 298 fs/xfs/libxfs/xfs_inode_buf.c to->di_extsize = cpu_to_be32(from->di_extsize); from 299 fs/xfs/libxfs/xfs_inode_buf.c to->di_nextents = cpu_to_be32(from->di_nextents); from 300 fs/xfs/libxfs/xfs_inode_buf.c to->di_anextents = cpu_to_be16(from->di_anextents); from 301 fs/xfs/libxfs/xfs_inode_buf.c to->di_forkoff = from->di_forkoff; from 302 fs/xfs/libxfs/xfs_inode_buf.c to->di_aformat = from->di_aformat; from 303 fs/xfs/libxfs/xfs_inode_buf.c to->di_dmevmask = cpu_to_be32(from->di_dmevmask); from 304 fs/xfs/libxfs/xfs_inode_buf.c to->di_dmstate = cpu_to_be16(from->di_dmstate); from 305 fs/xfs/libxfs/xfs_inode_buf.c to->di_flags = cpu_to_be16(from->di_flags); from 307 fs/xfs/libxfs/xfs_inode_buf.c if (from->di_version == 3) { from 309 fs/xfs/libxfs/xfs_inode_buf.c to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec); from 310 fs/xfs/libxfs/xfs_inode_buf.c to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec); from 311 fs/xfs/libxfs/xfs_inode_buf.c to->di_flags2 = cpu_to_be64(from->di_flags2); from 312 fs/xfs/libxfs/xfs_inode_buf.c to->di_cowextsize = cpu_to_be32(from->di_cowextsize); from 319 fs/xfs/libxfs/xfs_inode_buf.c to->di_flushiter = cpu_to_be16(from->di_flushiter); from 325 fs/xfs/libxfs/xfs_inode_buf.c struct xfs_log_dinode *from, from 328 fs/xfs/libxfs/xfs_inode_buf.c to->di_magic = cpu_to_be16(from->di_magic); from 329 fs/xfs/libxfs/xfs_inode_buf.c to->di_mode = cpu_to_be16(from->di_mode); from 330 fs/xfs/libxfs/xfs_inode_buf.c to->di_version = from->di_version; from 331 fs/xfs/libxfs/xfs_inode_buf.c to->di_format = from->di_format; from 333 fs/xfs/libxfs/xfs_inode_buf.c to->di_uid = cpu_to_be32(from->di_uid); from 334 fs/xfs/libxfs/xfs_inode_buf.c to->di_gid = cpu_to_be32(from->di_gid); from 335 fs/xfs/libxfs/xfs_inode_buf.c to->di_nlink = cpu_to_be32(from->di_nlink); from 336 fs/xfs/libxfs/xfs_inode_buf.c to->di_projid_lo = cpu_to_be16(from->di_projid_lo); from 337 fs/xfs/libxfs/xfs_inode_buf.c to->di_projid_hi = cpu_to_be16(from->di_projid_hi); from 338 fs/xfs/libxfs/xfs_inode_buf.c memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); from 340 fs/xfs/libxfs/xfs_inode_buf.c to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); from 341 fs/xfs/libxfs/xfs_inode_buf.c to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec); from 342 fs/xfs/libxfs/xfs_inode_buf.c to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec); from 343 fs/xfs/libxfs/xfs_inode_buf.c to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec); from 344 fs/xfs/libxfs/xfs_inode_buf.c to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec); from 345 fs/xfs/libxfs/xfs_inode_buf.c to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec); from 347 fs/xfs/libxfs/xfs_inode_buf.c to->di_size = cpu_to_be64(from->di_size); from 348 fs/xfs/libxfs/xfs_inode_buf.c to->di_nblocks = cpu_to_be64(from->di_nblocks); from 349 fs/xfs/libxfs/xfs_inode_buf.c to->di_extsize = cpu_to_be32(from->di_extsize); from 350 fs/xfs/libxfs/xfs_inode_buf.c to->di_nextents = cpu_to_be32(from->di_nextents); from 351 fs/xfs/libxfs/xfs_inode_buf.c to->di_anextents = cpu_to_be16(from->di_anextents); from 352 fs/xfs/libxfs/xfs_inode_buf.c to->di_forkoff = from->di_forkoff; from 353 fs/xfs/libxfs/xfs_inode_buf.c to->di_aformat = from->di_aformat; from 354 fs/xfs/libxfs/xfs_inode_buf.c to->di_dmevmask = cpu_to_be32(from->di_dmevmask); from 355 fs/xfs/libxfs/xfs_inode_buf.c to->di_dmstate = cpu_to_be16(from->di_dmstate); from 356 fs/xfs/libxfs/xfs_inode_buf.c to->di_flags = cpu_to_be16(from->di_flags); from 357 fs/xfs/libxfs/xfs_inode_buf.c to->di_gen = cpu_to_be32(from->di_gen); from 359 fs/xfs/libxfs/xfs_inode_buf.c if (from->di_version == 3) { from 360 fs/xfs/libxfs/xfs_inode_buf.c to->di_changecount = cpu_to_be64(from->di_changecount); from 361 fs/xfs/libxfs/xfs_inode_buf.c to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec); from 362 fs/xfs/libxfs/xfs_inode_buf.c to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec); from 363 fs/xfs/libxfs/xfs_inode_buf.c to->di_flags2 = cpu_to_be64(from->di_flags2); from 364 fs/xfs/libxfs/xfs_inode_buf.c to->di_cowextsize = cpu_to_be32(from->di_cowextsize); from 365 fs/xfs/libxfs/xfs_inode_buf.c to->di_ino = cpu_to_be64(from->di_ino); from 366 fs/xfs/libxfs/xfs_inode_buf.c to->di_lsn = cpu_to_be64(from->di_lsn); from 367 fs/xfs/libxfs/xfs_inode_buf.c memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2)); from 368 fs/xfs/libxfs/xfs_inode_buf.c uuid_copy(&to->di_uuid, &from->di_uuid); from 371 fs/xfs/libxfs/xfs_inode_buf.c to->di_flushiter = cpu_to_be16(from->di_flushiter); from 61 fs/xfs/libxfs/xfs_inode_buf.h void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from); from 62 fs/xfs/libxfs/xfs_inode_buf.h void xfs_log_dinode_to_disk(struct xfs_log_dinode *from, from 452 fs/xfs/libxfs/xfs_sb.c xfs_dsb_t *from, from 455 fs/xfs/libxfs/xfs_sb.c to->sb_magicnum = be32_to_cpu(from->sb_magicnum); from 456 fs/xfs/libxfs/xfs_sb.c to->sb_blocksize = be32_to_cpu(from->sb_blocksize); from 457 fs/xfs/libxfs/xfs_sb.c to->sb_dblocks = be64_to_cpu(from->sb_dblocks); from 458 fs/xfs/libxfs/xfs_sb.c to->sb_rblocks = be64_to_cpu(from->sb_rblocks); from 459 fs/xfs/libxfs/xfs_sb.c to->sb_rextents = be64_to_cpu(from->sb_rextents); from 460 fs/xfs/libxfs/xfs_sb.c memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid)); from 461 fs/xfs/libxfs/xfs_sb.c to->sb_logstart = be64_to_cpu(from->sb_logstart); from 462 fs/xfs/libxfs/xfs_sb.c to->sb_rootino = be64_to_cpu(from->sb_rootino); from 463 fs/xfs/libxfs/xfs_sb.c to->sb_rbmino = be64_to_cpu(from->sb_rbmino); from 464 fs/xfs/libxfs/xfs_sb.c to->sb_rsumino = be64_to_cpu(from->sb_rsumino); from 465 fs/xfs/libxfs/xfs_sb.c to->sb_rextsize = be32_to_cpu(from->sb_rextsize); from 466 fs/xfs/libxfs/xfs_sb.c to->sb_agblocks = be32_to_cpu(from->sb_agblocks); from 467 fs/xfs/libxfs/xfs_sb.c to->sb_agcount = be32_to_cpu(from->sb_agcount); from 468 fs/xfs/libxfs/xfs_sb.c to->sb_rbmblocks = be32_to_cpu(from->sb_rbmblocks); from 469 fs/xfs/libxfs/xfs_sb.c to->sb_logblocks = be32_to_cpu(from->sb_logblocks); from 470 fs/xfs/libxfs/xfs_sb.c to->sb_versionnum = be16_to_cpu(from->sb_versionnum); from 471 fs/xfs/libxfs/xfs_sb.c to->sb_sectsize = be16_to_cpu(from->sb_sectsize); from 472 fs/xfs/libxfs/xfs_sb.c to->sb_inodesize = be16_to_cpu(from->sb_inodesize); from 473 fs/xfs/libxfs/xfs_sb.c to->sb_inopblock = be16_to_cpu(from->sb_inopblock); from 474 fs/xfs/libxfs/xfs_sb.c memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname)); from 475 fs/xfs/libxfs/xfs_sb.c to->sb_blocklog = from->sb_blocklog; from 476 fs/xfs/libxfs/xfs_sb.c to->sb_sectlog = from->sb_sectlog; from 477 fs/xfs/libxfs/xfs_sb.c to->sb_inodelog = from->sb_inodelog; from 478 fs/xfs/libxfs/xfs_sb.c to->sb_inopblog = from->sb_inopblog; from 479 fs/xfs/libxfs/xfs_sb.c to->sb_agblklog = from->sb_agblklog; from 480 fs/xfs/libxfs/xfs_sb.c to->sb_rextslog = from->sb_rextslog; from 481 fs/xfs/libxfs/xfs_sb.c to->sb_inprogress = from->sb_inprogress; from 482 fs/xfs/libxfs/xfs_sb.c to->sb_imax_pct = from->sb_imax_pct; from 483 fs/xfs/libxfs/xfs_sb.c to->sb_icount = be64_to_cpu(from->sb_icount); from 484 fs/xfs/libxfs/xfs_sb.c to->sb_ifree = be64_to_cpu(from->sb_ifree); from 485 fs/xfs/libxfs/xfs_sb.c to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks); from 486 fs/xfs/libxfs/xfs_sb.c to->sb_frextents = be64_to_cpu(from->sb_frextents); from 487 fs/xfs/libxfs/xfs_sb.c to->sb_uquotino = be64_to_cpu(from->sb_uquotino); from 488 fs/xfs/libxfs/xfs_sb.c to->sb_gquotino = be64_to_cpu(from->sb_gquotino); from 489 fs/xfs/libxfs/xfs_sb.c to->sb_qflags = be16_to_cpu(from->sb_qflags); from 490 fs/xfs/libxfs/xfs_sb.c to->sb_flags = from->sb_flags; from 491 fs/xfs/libxfs/xfs_sb.c to->sb_shared_vn = from->sb_shared_vn; from 492 fs/xfs/libxfs/xfs_sb.c to->sb_inoalignmt = be32_to_cpu(from->sb_inoalignmt); from 493 fs/xfs/libxfs/xfs_sb.c to->sb_unit = be32_to_cpu(from->sb_unit); from 494 fs/xfs/libxfs/xfs_sb.c to->sb_width = be32_to_cpu(from->sb_width); from 495 fs/xfs/libxfs/xfs_sb.c to->sb_dirblklog = from->sb_dirblklog; from 496 fs/xfs/libxfs/xfs_sb.c to->sb_logsectlog = from->sb_logsectlog; from 497 fs/xfs/libxfs/xfs_sb.c to->sb_logsectsize = be16_to_cpu(from->sb_logsectsize); from 498 fs/xfs/libxfs/xfs_sb.c to->sb_logsunit = be32_to_cpu(from->sb_logsunit); from 499 fs/xfs/libxfs/xfs_sb.c to->sb_features2 = be32_to_cpu(from->sb_features2); from 500 fs/xfs/libxfs/xfs_sb.c to->sb_bad_features2 = be32_to_cpu(from->sb_bad_features2); from 501 fs/xfs/libxfs/xfs_sb.c to->sb_features_compat = be32_to_cpu(from->sb_features_compat); from 502 fs/xfs/libxfs/xfs_sb.c to->sb_features_ro_compat = be32_to_cpu(from->sb_features_ro_compat); from 503 fs/xfs/libxfs/xfs_sb.c to->sb_features_incompat = be32_to_cpu(from->sb_features_incompat); from 505 fs/xfs/libxfs/xfs_sb.c be32_to_cpu(from->sb_features_log_incompat); from 508 fs/xfs/libxfs/xfs_sb.c to->sb_spino_align = be32_to_cpu(from->sb_spino_align); from 509 fs/xfs/libxfs/xfs_sb.c to->sb_pquotino = be64_to_cpu(from->sb_pquotino); from 510 fs/xfs/libxfs/xfs_sb.c to->sb_lsn = be64_to_cpu(from->sb_lsn); from 516 fs/xfs/libxfs/xfs_sb.c uuid_copy(&to->sb_meta_uuid, &from->sb_meta_uuid); from 518 fs/xfs/libxfs/xfs_sb.c uuid_copy(&to->sb_meta_uuid, &from->sb_uuid); from 527 fs/xfs/libxfs/xfs_sb.c xfs_dsb_t *from) from 529 fs/xfs/libxfs/xfs_sb.c __xfs_sb_from_disk(to, from, true); from 535 fs/xfs/libxfs/xfs_sb.c struct xfs_sb *from) from 537 fs/xfs/libxfs/xfs_sb.c uint16_t qflags = from->sb_qflags; from 539 fs/xfs/libxfs/xfs_sb.c to->sb_uquotino = cpu_to_be64(from->sb_uquotino); from 540 fs/xfs/libxfs/xfs_sb.c if (xfs_sb_version_has_pquotino(from)) { from 541 fs/xfs/libxfs/xfs_sb.c to->sb_qflags = cpu_to_be16(from->sb_qflags); from 542 fs/xfs/libxfs/xfs_sb.c to->sb_gquotino = cpu_to_be64(from->sb_gquotino); from 543 fs/xfs/libxfs/xfs_sb.c to->sb_pquotino = cpu_to_be64(from->sb_pquotino); from 555 fs/xfs/libxfs/xfs_sb.c if (from->sb_qflags & from 558 fs/xfs/libxfs/xfs_sb.c if (from->sb_qflags & from 572 fs/xfs/libxfs/xfs_sb.c if (from->sb_qflags & XFS_GQUOTA_ACCT) from 573 fs/xfs/libxfs/xfs_sb.c to->sb_gquotino = cpu_to_be64(from->sb_gquotino); from 574 fs/xfs/libxfs/xfs_sb.c else if (from->sb_qflags & XFS_PQUOTA_ACCT) from 575 fs/xfs/libxfs/xfs_sb.c to->sb_gquotino = cpu_to_be64(from->sb_pquotino); from 583 fs/xfs/libxfs/xfs_sb.c if (from->sb_gquotino == NULLFSINO && from 584 fs/xfs/libxfs/xfs_sb.c from->sb_pquotino == NULLFSINO) from 594 fs/xfs/libxfs/xfs_sb.c struct xfs_sb *from) from 596 fs/xfs/libxfs/xfs_sb.c xfs_sb_quota_to_disk(to, from); from 598 fs/xfs/libxfs/xfs_sb.c to->sb_magicnum = cpu_to_be32(from->sb_magicnum); from 599 fs/xfs/libxfs/xfs_sb.c to->sb_blocksize = cpu_to_be32(from->sb_blocksize); from 600 fs/xfs/libxfs/xfs_sb.c to->sb_dblocks = cpu_to_be64(from->sb_dblocks); from 601 fs/xfs/libxfs/xfs_sb.c to->sb_rblocks = cpu_to_be64(from->sb_rblocks); from 602 fs/xfs/libxfs/xfs_sb.c to->sb_rextents = cpu_to_be64(from->sb_rextents); from 603 fs/xfs/libxfs/xfs_sb.c memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid)); from 604 fs/xfs/libxfs/xfs_sb.c to->sb_logstart = cpu_to_be64(from->sb_logstart); from 605 fs/xfs/libxfs/xfs_sb.c to->sb_rootino = cpu_to_be64(from->sb_rootino); from 606 fs/xfs/libxfs/xfs_sb.c to->sb_rbmino = cpu_to_be64(from->sb_rbmino); from 607 fs/xfs/libxfs/xfs_sb.c to->sb_rsumino = cpu_to_be64(from->sb_rsumino); from 608 fs/xfs/libxfs/xfs_sb.c to->sb_rextsize = cpu_to_be32(from->sb_rextsize); from 609 fs/xfs/libxfs/xfs_sb.c to->sb_agblocks = cpu_to_be32(from->sb_agblocks); from 610 fs/xfs/libxfs/xfs_sb.c to->sb_agcount = cpu_to_be32(from->sb_agcount); from 611 fs/xfs/libxfs/xfs_sb.c to->sb_rbmblocks = cpu_to_be32(from->sb_rbmblocks); from 612 fs/xfs/libxfs/xfs_sb.c to->sb_logblocks = cpu_to_be32(from->sb_logblocks); from 613 fs/xfs/libxfs/xfs_sb.c to->sb_versionnum = cpu_to_be16(from->sb_versionnum); from 614 fs/xfs/libxfs/xfs_sb.c to->sb_sectsize = cpu_to_be16(from->sb_sectsize); from 615 fs/xfs/libxfs/xfs_sb.c to->sb_inodesize = cpu_to_be16(from->sb_inodesize); from 616 fs/xfs/libxfs/xfs_sb.c to->sb_inopblock = cpu_to_be16(from->sb_inopblock); from 617 fs/xfs/libxfs/xfs_sb.c memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname)); from 618 fs/xfs/libxfs/xfs_sb.c to->sb_blocklog = from->sb_blocklog; from 619 fs/xfs/libxfs/xfs_sb.c to->sb_sectlog = from->sb_sectlog; from 620 fs/xfs/libxfs/xfs_sb.c to->sb_inodelog = from->sb_inodelog; from 621 fs/xfs/libxfs/xfs_sb.c to->sb_inopblog = from->sb_inopblog; from 622 fs/xfs/libxfs/xfs_sb.c to->sb_agblklog = from->sb_agblklog; from 623 fs/xfs/libxfs/xfs_sb.c to->sb_rextslog = from->sb_rextslog; from 624 fs/xfs/libxfs/xfs_sb.c to->sb_inprogress = from->sb_inprogress; from 625 fs/xfs/libxfs/xfs_sb.c to->sb_imax_pct = from->sb_imax_pct; from 626 fs/xfs/libxfs/xfs_sb.c to->sb_icount = cpu_to_be64(from->sb_icount); from 627 fs/xfs/libxfs/xfs_sb.c to->sb_ifree = cpu_to_be64(from->sb_ifree); from 628 fs/xfs/libxfs/xfs_sb.c to->sb_fdblocks = cpu_to_be64(from->sb_fdblocks); from 629 fs/xfs/libxfs/xfs_sb.c to->sb_frextents = cpu_to_be64(from->sb_frextents); from 631 fs/xfs/libxfs/xfs_sb.c to->sb_flags = from->sb_flags; from 632 fs/xfs/libxfs/xfs_sb.c to->sb_shared_vn = from->sb_shared_vn; from 633 fs/xfs/libxfs/xfs_sb.c to->sb_inoalignmt = cpu_to_be32(from->sb_inoalignmt); from 634 fs/xfs/libxfs/xfs_sb.c to->sb_unit = cpu_to_be32(from->sb_unit); from 635 fs/xfs/libxfs/xfs_sb.c to->sb_width = cpu_to_be32(from->sb_width); from 636 fs/xfs/libxfs/xfs_sb.c to->sb_dirblklog = from->sb_dirblklog; from 637 fs/xfs/libxfs/xfs_sb.c to->sb_logsectlog = from->sb_logsectlog; from 638 fs/xfs/libxfs/xfs_sb.c to->sb_logsectsize = cpu_to_be16(from->sb_logsectsize); from 639 fs/xfs/libxfs/xfs_sb.c to->sb_logsunit = cpu_to_be32(from->sb_logsunit); from 646 fs/xfs/libxfs/xfs_sb.c from->sb_bad_features2 = from->sb_features2; from 647 fs/xfs/libxfs/xfs_sb.c to->sb_features2 = cpu_to_be32(from->sb_features2); from 648 fs/xfs/libxfs/xfs_sb.c to->sb_bad_features2 = cpu_to_be32(from->sb_bad_features2); from 650 fs/xfs/libxfs/xfs_sb.c if (xfs_sb_version_hascrc(from)) { from 651 fs/xfs/libxfs/xfs_sb.c to->sb_features_compat = cpu_to_be32(from->sb_features_compat); from 653 fs/xfs/libxfs/xfs_sb.c cpu_to_be32(from->sb_features_ro_compat); from 655 fs/xfs/libxfs/xfs_sb.c cpu_to_be32(from->sb_features_incompat); from 657 fs/xfs/libxfs/xfs_sb.c cpu_to_be32(from->sb_features_log_incompat); from 658 fs/xfs/libxfs/xfs_sb.c to->sb_spino_align = cpu_to_be32(from->sb_spino_align); from 659 fs/xfs/libxfs/xfs_sb.c to->sb_lsn = cpu_to_be64(from->sb_lsn); from 660 fs/xfs/libxfs/xfs_sb.c if (xfs_sb_version_hasmetauuid(from)) from 661 fs/xfs/libxfs/xfs_sb.c uuid_copy(&to->sb_meta_uuid, &from->sb_meta_uuid); from 29 fs/xfs/libxfs/xfs_sb.h extern void xfs_sb_from_disk(struct xfs_sb *to, struct xfs_dsb *from); from 30 fs/xfs/libxfs/xfs_sb.h extern void xfs_sb_to_disk(struct xfs_dsb *to, struct xfs_sb *from); from 283 fs/xfs/xfs_file.c struct iov_iter *from, from 290 fs/xfs/xfs_file.c size_t count = iov_iter_count(from); from 295 fs/xfs/xfs_file.c error = generic_write_checks(iocb, from); from 337 fs/xfs/xfs_file.c iov_iter_reexpand(from, count); from 478 fs/xfs/xfs_file.c struct iov_iter *from) from 488 fs/xfs/xfs_file.c size_t count = iov_iter_count(from); from 530 fs/xfs/xfs_file.c ret = xfs_file_aio_write_checks(iocb, from, &iolock); from 533 fs/xfs/xfs_file.c count = iov_iter_count(from); from 550 fs/xfs/xfs_file.c ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, &xfs_dio_write_ops); from 573 fs/xfs/xfs_file.c struct iov_iter *from) from 589 fs/xfs/xfs_file.c ret = xfs_file_aio_write_checks(iocb, from, &iolock); from 594 fs/xfs/xfs_file.c count = iov_iter_count(from); from 597 fs/xfs/xfs_file.c ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops); from 619 fs/xfs/xfs_file.c struct iov_iter *from) from 636 fs/xfs/xfs_file.c ret = xfs_file_aio_write_checks(iocb, from, &iolock); from 643 fs/xfs/xfs_file.c trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos); from 644 fs/xfs/xfs_file.c ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops); from 695 fs/xfs/xfs_file.c struct iov_iter *from) from 702 fs/xfs/xfs_file.c size_t ocount = iov_iter_count(from); from 713 fs/xfs/xfs_file.c return xfs_file_dax_write(iocb, from); from 722 fs/xfs/xfs_file.c ret = xfs_file_dio_aio_write(iocb, from); from 727 fs/xfs/xfs_file.c return xfs_file_buffered_aio_write(iocb, from); from 303 fs/xfs/xfs_inode_item.c struct xfs_icdinode *from = &ip->i_d; from 308 fs/xfs/xfs_inode_item.c to->di_version = from->di_version; from 309 fs/xfs/xfs_inode_item.c to->di_format = from->di_format; from 310 fs/xfs/xfs_inode_item.c to->di_uid = from->di_uid; from 311 fs/xfs/xfs_inode_item.c to->di_gid = from->di_gid; from 312 fs/xfs/xfs_inode_item.c to->di_projid_lo = from->di_projid_lo; from 313 fs/xfs/xfs_inode_item.c to->di_projid_hi = from->di_projid_hi; from 327 fs/xfs/xfs_inode_item.c to->di_size = from->di_size; from 328 fs/xfs/xfs_inode_item.c to->di_nblocks = from->di_nblocks; from 329 fs/xfs/xfs_inode_item.c to->di_extsize = from->di_extsize; from 330 fs/xfs/xfs_inode_item.c to->di_nextents = from->di_nextents; from 331 fs/xfs/xfs_inode_item.c to->di_anextents = from->di_anextents; from 332 fs/xfs/xfs_inode_item.c to->di_forkoff = from->di_forkoff; from 333 fs/xfs/xfs_inode_item.c to->di_aformat = from->di_aformat; from 334 fs/xfs/xfs_inode_item.c to->di_dmevmask = from->di_dmevmask; from 335 fs/xfs/xfs_inode_item.c to->di_dmstate = from->di_dmstate; from 336 fs/xfs/xfs_inode_item.c to->di_flags = from->di_flags; from 341 fs/xfs/xfs_inode_item.c if (from->di_version == 3) { from 343 fs/xfs/xfs_inode_item.c to->di_crtime.t_sec = from->di_crtime.t_sec; from 344 fs/xfs/xfs_inode_item.c to->di_crtime.t_nsec = from->di_crtime.t_nsec; from 345 fs/xfs/xfs_inode_item.c to->di_flags2 = from->di_flags2; from 346 fs/xfs/xfs_inode_item.c to->di_cowextsize = from->di_cowextsize; from 353 fs/xfs/xfs_inode_item.c to->di_flushiter = from->di_flushiter; from 29 include/asm-generic/page.h #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) from 32 include/asm-generic/page.h #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) from 14 include/asm-generic/uaccess.h raw_copy_from_user(void *to, const void __user * from, unsigned long n) from 19 include/asm-generic/uaccess.h *(u8 *)to = *(u8 __force *)from; from 22 include/asm-generic/uaccess.h *(u16 *)to = *(u16 __force *)from; from 25 include/asm-generic/uaccess.h *(u32 *)to = *(u32 __force *)from; from 29 include/asm-generic/uaccess.h *(u64 *)to = *(u64 __force *)from; from 35 include/asm-generic/uaccess.h memcpy(to, (const void __force *)from, n); from 40 include/asm-generic/uaccess.h raw_copy_to_user(void __user *to, const void *from, unsigned long n) from 45 include/asm-generic/uaccess.h *(u8 __force *)to = *(u8 *)from; from 48 include/asm-generic/uaccess.h *(u16 __force *)to = *(u16 *)from; from 51 include/asm-generic/uaccess.h *(u32 __force *)to = *(u32 *)from; from 55 include/asm-generic/uaccess.h *(u64 __force *)to = *(u64 *)from; from 63 include/asm-generic/uaccess.h memcpy((void __force *)to, from, n); from 113 include/linux/bitfield.h #define ____MAKE_OP(type,base,to,from) \ from 132 include/linux/bitfield.h return (from(v) & field)/field_multiplier(field); \ from 690 include/linux/blk-cgroup.h struct blkg_rwstat *from) from 696 include/linux/blk-cgroup.h sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]); from 699 include/linux/blk-cgroup.h atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]), from 225 include/linux/buffer_head.h int block_is_partially_uptodate(struct page *page, unsigned long from, from 237 include/linux/buffer_head.h void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); from 243 include/linux/buffer_head.h int block_commit_write(struct page *page, unsigned from, unsigned to); from 111 include/linux/cgroup.h int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); from 112 include/linux/cgroup.h int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); from 706 include/linux/cgroup.h static inline int cgroup_attach_task_all(struct task_struct *from, from 127 include/linux/clocksource.h static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from) from 136 include/linux/clocksource.h u64 tmp = ((u64)from) << shift_constant; from 204 include/linux/clocksource.h clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); from 21 include/linux/cmdline-parser.h sector_t from; from 421 include/linux/compat.h int copy_siginfo_from_user32(kernel_siginfo_t *to, const struct compat_siginfo __user *from); from 422 include/linux/compat.h int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginfo_t *from); from 24 include/linux/crash_dump.h unsigned long from, unsigned long pfn, from 54 include/linux/debug_locks.h extern void debug_check_no_locks_freed(const void *from, unsigned long len); from 66 include/linux/debug_locks.h debug_check_no_locks_freed(const void *from, unsigned long len) from 65 include/linux/dm-kcopyd.h void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, from 104 include/linux/dmi.h const struct dmi_device *from); from 122 include/linux/dmi.h const struct dmi_device *from) { return NULL; } from 760 include/linux/fb.h extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to); from 761 include/linux/fb.h extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to); from 82 include/linux/file.h extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); from 3119 include/linux/fs.h extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from); from 3339 include/linux/fs.h loff_t *ppos, const void *from, size_t available); from 3341 include/linux/fs.h const void __user *from, size_t count); from 250 include/linux/highmem.h static inline void copy_user_highpage(struct page *to, struct page *from, from 255 include/linux/highmem.h vfrom = kmap_atomic(from); from 266 include/linux/highmem.h static inline void copy_highpage(struct page *to, struct page *from) from 270 include/linux/highmem.h vfrom = kmap_atomic(from); from 97 include/linux/hugetlb.h int hugetlb_reserve_pages(struct inode *inode, long from, long to, from 19 include/linux/io.h __visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); from 20 include/linux/io.h void __ioread32_copy(void *to, const void __iomem *from, size_t count); from 21 include/linux/io.h void __iowrite64_copy(void __iomem *to, const void *from, size_t count); from 152 include/linux/iomap.h ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, from 158 include/linux/iomap.h int iomap_is_partially_uptodate(struct page *page, unsigned long from, from 155 include/linux/iova.h void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); from 228 include/linux/iova.h static inline void copy_reserved_iova(struct iova_domain *from, from 866 include/linux/irq.h unsigned int arch_dynirq_lower_bound(unsigned int from); from 868 include/linux/irq.h int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, from 872 include/linux/irq.h int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, from 877 include/linux/irq.h #define irq_alloc_descs(irq, from, cnt, node) \ from 878 include/linux/irq.h __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) from 886 include/linux/irq.h #define irq_alloc_desc_from(from, node) \ from 887 include/linux/irq.h irq_alloc_descs(-1, from, 1, node) from 889 include/linux/irq.h #define irq_alloc_descs_from(from, cnt, node) \ from 890 include/linux/irq.h irq_alloc_descs(-1, from, cnt, node) from 892 include/linux/irq.h #define devm_irq_alloc_descs(dev, irq, from, cnt, node) \ from 893 include/linux/irq.h __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL) from 901 include/linux/irq.h #define devm_irq_alloc_desc_from(dev, from, node) \ from 902 include/linux/irq.h devm_irq_alloc_descs(dev, -1, from, 1, node) from 904 include/linux/irq.h #define devm_irq_alloc_descs_from(dev, from, cnt, node) \ from 905 include/linux/irq.h devm_irq_alloc_descs(dev, -1, from, cnt, node) from 919 include/linux/irq.h void irq_free_hwirqs(unsigned int from, int cnt); from 80 include/linux/isapnp.h struct pnp_card *from); from 84 include/linux/isapnp.h struct pnp_dev *from); from 97 include/linux/isapnp.h struct pnp_card *from) { return NULL; } from 101 include/linux/isapnp.h struct pnp_dev *from) { return NULL; } from 589 include/linux/kfifo.h #define kfifo_from_user(fifo, from, len, copied) \ from 593 include/linux/kfifo.h const void __user *__from = (from); \ from 773 include/linux/kfifo.h const void __user *from, unsigned long len, unsigned int *copied); from 794 include/linux/kfifo.h const void __user *from, unsigned long len, unsigned int *copied, from 168 include/linux/lockdep.h struct lockdep_map *from) from 172 include/linux/lockdep.h *to = *from; from 1460 include/linux/lsm_hooks.h int (*binder_transaction)(struct task_struct *from, from 1462 include/linux/lsm_hooks.h int (*binder_transfer_binder)(struct task_struct *from, from 1464 include/linux/lsm_hooks.h int (*binder_transfer_file)(struct task_struct *from, from 165 include/linux/mempolicy.h int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, from 285 include/linux/mempolicy.h static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, from 1481 include/linux/mm.h void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); from 49 include/linux/mtd/hyperbus.h unsigned long from, ssize_t len); from 51 include/linux/mtd/hyperbus.h const void *from, ssize_t len); from 256 include/linux/mtd/map.h #define INVALIDATE_CACHED_RANGE(map, from, size) \ from 257 include/linux/mtd/map.h do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) from 430 include/linux/mtd/map.h static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) from 433 include/linux/mtd/map.h memcpy(to, (char *)map->cached + from, len); from 435 include/linux/mtd/map.h memcpy_fromio(to, map->virt + from, len); from 438 include/linux/mtd/map.h static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) from 440 include/linux/mtd/map.h memcpy_toio(map->virt + to, from, len); from 445 include/linux/mtd/map.h #define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len) from 447 include/linux/mtd/map.h #define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len) from 454 include/linux/mtd/map.h #define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len) from 456 include/linux/mtd/map.h #define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len) from 277 include/linux/mtd/mtd.h int (*_point) (struct mtd_info *mtd, loff_t from, size_t len, from 279 include/linux/mtd/mtd.h int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len); from 280 include/linux/mtd/mtd.h int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, from 286 include/linux/mtd/mtd.h int (*_read_oob) (struct mtd_info *mtd, loff_t from, from 292 include/linux/mtd/mtd.h int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, from 296 include/linux/mtd/mtd.h int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from, from 300 include/linux/mtd/mtd.h int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, from 410 include/linux/mtd/mtd.h int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, from 412 include/linux/mtd/mtd.h int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len); from 415 include/linux/mtd/mtd.h int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, from 422 include/linux/mtd/mtd.h int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops); from 427 include/linux/mtd/mtd.h int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, from 431 include/linux/mtd/mtd.h int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, from 435 include/linux/mtd/mtd.h int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len); from 224 include/linux/mtd/onenand.h int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from, from 596 include/linux/mtd/spi-nor.h ssize_t (*read)(struct spi_nor *nor, loff_t from, from 4144 include/linux/netdevice.h int dev_uc_sync(struct net_device *to, struct net_device *from); from 4145 include/linux/netdevice.h int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); from 4146 include/linux/netdevice.h void dev_uc_unsync(struct net_device *to, struct net_device *from); from 4188 include/linux/netdevice.h int dev_mc_sync(struct net_device *to, struct net_device *from); from 4189 include/linux/netdevice.h int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); from 4190 include/linux/netdevice.h void dev_mc_unsync(struct net_device *to, struct net_device *from); from 18 include/linux/netfilter/ipset/ip_set_bitmap.h range_to_mask(u32 from, u32 to, u8 *bits) from 23 include/linux/netfilter/ipset/ip_set_bitmap.h while (--(*bits) > 0 && mask && (to & mask) != from) from 37 include/linux/netfilter/ipset/pfxlen.h extern u32 ip_set_range_to_cidr(u32 from, u32 to, u8 *cidr); from 39 include/linux/netfilter/ipset/pfxlen.h #define ip_set_mask_from_to(from, to, cidr) \ from 41 include/linux/netfilter/ipset/pfxlen.h from &= ip_set_hostmask(cidr); \ from 42 include/linux/netfilter/ipset/pfxlen.h to = from | ~ip_set_hostmask(cidr); \ from 124 include/linux/nubus.h struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from); from 264 include/linux/of.h #define for_each_of_allnodes_from(from, dn) \ from 265 include/linux/of.h for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn)) from 267 include/linux/of.h extern struct device_node *of_find_node_by_name(struct device_node *from, from 269 include/linux/of.h extern struct device_node *of_find_node_by_type(struct device_node *from, from 271 include/linux/of.h extern struct device_node *of_find_compatible_node(struct device_node *from, from 274 include/linux/of.h struct device_node *from, from 302 include/linux/of.h struct device_node *from, const char *prop_name); from 590 include/linux/of.h static inline struct device_node *of_find_node_by_name(struct device_node *from, from 596 include/linux/of.h static inline struct device_node *of_find_node_by_type(struct device_node *from, from 603 include/linux/of.h struct device_node *from, from 644 include/linux/of.h struct device_node *from, const char *prop_name) from 699 include/linux/of.h struct device_node *from, from 1016 include/linux/of.h struct device_node *from, from 1019 include/linux/of.h return of_find_matching_node_and_match(from, matches, NULL); from 37 include/linux/of_address.h struct device_node *from, from 75 include/linux/of_address.h struct device_node *from, from 25 include/linux/parser.h char *from; from 1049 include/linux/pci.h struct pci_bus *pci_find_next_bus(const struct pci_bus *from); from 1052 include/linux/pci.h struct pci_dev *from); from 1055 include/linux/pci.h struct pci_dev *from); from 1059 include/linux/pci.h struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); from 1668 include/linux/pci.h struct pci_dev *from) from 1675 include/linux/pci.h struct pci_dev *from) from 1679 include/linux/pci.h struct pci_dev *from) from 1728 include/linux/pci.h static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) from 102 include/linux/percpu.h typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); from 450 include/linux/pnp.h const char *id, struct pnp_dev *from); from 481 include/linux/pnp.h static inline struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink, const char *id, struct pnp_dev *from) { return NULL; } from 125 include/linux/poll.h #define __MAP(v, from, to) \ from 126 include/linux/poll.h (from < to ? (v & from) * (to/from) : (v & from) / (from/to)) from 35 include/linux/prime_numbers.h #define for_each_prime_number_from(prime, from, max) \ from 36 include/linux/prime_numbers.h for (prime = (from); prime <= (max); prime = next_prime_number(prime)) from 53 include/linux/projid.h extern kprojid_t make_kprojid(struct user_namespace *from, projid_t projid); from 65 include/linux/projid.h static inline kprojid_t make_kprojid(struct user_namespace *from, projid_t projid) from 97 include/linux/quota.h static inline struct kqid make_kqid(struct user_namespace *from, from 105 include/linux/quota.h kqid.uid = make_kuid(from, qid); from 108 include/linux/quota.h kqid.gid = make_kgid(from, qid); from 111 include/linux/quota.h kqid.projid = make_kprojid(from, qid); from 447 include/linux/rio_drv.h extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from); from 449 include/linux/rio_drv.h struct rio_dev *from); from 49 include/linux/rtmutex.h extern int rt_mutex_debug_check_no_locks_freed(const void *from, from 53 include/linux/rtmutex.h static inline int rt_mutex_debug_check_no_locks_freed(const void *from, from 1683 include/linux/sched.h extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); from 1685 include/linux/sched.h static inline void set_task_comm(struct task_struct *tsk, const char *from) from 1687 include/linux/sched.h __set_task_comm(tsk, from, false); from 253 include/linux/security.h int security_binder_transaction(struct task_struct *from, from 255 include/linux/security.h int security_binder_transfer_binder(struct task_struct *from, from 257 include/linux/security.h int security_binder_transfer_file(struct task_struct *from, from 489 include/linux/security.h static inline int security_binder_transaction(struct task_struct *from, from 495 include/linux/security.h static inline int security_binder_transfer_binder(struct task_struct *from, from 501 include/linux/security.h static inline int security_binder_transfer_file(struct task_struct *from, from 19 include/linux/seq_file.h size_t from; from 15 include/linux/signal.h const kernel_siginfo_t *from) from 17 include/linux/signal.h memcpy(to, from, sizeof(*to)); from 27 include/linux/signal.h int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from); from 28 include/linux/signal.h int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from); from 1031 include/linux/skbuff.h bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, from 1160 include/linux/skbuff.h void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, from 1166 include/linux/skbuff.h unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, from 1367 include/linux/skbuff.h static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) from 1369 include/linux/skbuff.h to->hash = from->hash; from 1370 include/linux/skbuff.h to->sw_hash = from->sw_hash; from 1371 include/linux/skbuff.h to->l4_hash = from->l4_hash; from 1375 include/linux/skbuff.h const struct sk_buff *from) from 1378 include/linux/skbuff.h to->decrypted = from->decrypted; from 3218 include/linux/skbuff.h struct iov_iter *from, int copy) from 3225 include/linux/skbuff.h &csum, from)) { from 3229 include/linux/skbuff.h } else if (copy_from_iter_full(skb_put(skb, copy), copy, from)) from 3486 include/linux/skbuff.h int skb_copy_datagram_iter(const struct sk_buff *from, int offset, from 3488 include/linux/skbuff.h static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset, from 3491 include/linux/skbuff.h return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size); from 3499 include/linux/skbuff.h struct iov_iter *from, int len); from 3510 include/linux/skbuff.h int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); from 3519 include/linux/skbuff.h unsigned int skb_zerocopy_headlen(const struct sk_buff *from); from 3520 include/linux/skbuff.h int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, from 3618 include/linux/skbuff.h const void *from, from 3621 include/linux/skbuff.h memcpy(skb->data, from, len); from 3626 include/linux/skbuff.h const void *from, from 3629 include/linux/skbuff.h memcpy(skb->data + offset, from, len); from 4236 include/linux/skbuff.h static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) from 4238 include/linux/skbuff.h to->secmark = from->secmark; from 4246 include/linux/skbuff.h static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) from 4281 include/linux/skbuff.h static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) from 4283 include/linux/skbuff.h to->queue_mapping = from->queue_mapping; from 125 include/linux/skmsg.h int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, from 127 include/linux/skmsg.h int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, from 16 include/linux/sonet.h extern void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to); from 17 include/linux/sonet.h extern void sonet_subtract_stats(struct k_sonet_stats *from, from 217 include/linux/string.h const void *from, size_t available); from 892 include/linux/syscalls.h const unsigned long __user *from, from 1009 include/linux/syscalls.h asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on); from 487 include/linux/tcp.h int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, from 52 include/linux/topology.h #define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE) from 59 include/linux/uaccess.h __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) from 63 include/linux/uaccess.h return raw_copy_from_user(to, from, n); from 67 include/linux/uaccess.h __copy_from_user(void *to, const void __user *from, unsigned long n) from 72 include/linux/uaccess.h return raw_copy_from_user(to, from, n); from 89 include/linux/uaccess.h __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) from 91 include/linux/uaccess.h kasan_check_read(from, n); from 92 include/linux/uaccess.h check_object_size(from, n, true); from 93 include/linux/uaccess.h return raw_copy_to_user(to, from, n); from 97 include/linux/uaccess.h __copy_to_user(void __user *to, const void *from, unsigned long n) from 100 include/linux/uaccess.h kasan_check_read(from, n); from 101 include/linux/uaccess.h check_object_size(from, n, true); from 102 include/linux/uaccess.h return raw_copy_to_user(to, from, n); from 107 include/linux/uaccess.h _copy_from_user(void *to, const void __user *from, unsigned long n) from 111 include/linux/uaccess.h if (likely(access_ok(from, n))) { from 113 include/linux/uaccess.h res = raw_copy_from_user(to, from, n); from 126 include/linux/uaccess.h _copy_to_user(void __user *to, const void *from, unsigned long n) from 130 include/linux/uaccess.h kasan_check_read(from, n); from 131 include/linux/uaccess.h n = raw_copy_to_user(to, from, n); from 141 include/linux/uaccess.h copy_from_user(void *to, const void __user *from, unsigned long n) from 144 include/linux/uaccess.h n = _copy_from_user(to, from, n); from 149 include/linux/uaccess.h copy_to_user(void __user *to, const void *from, unsigned long n) from 151 include/linux/uaccess.h if (likely(check_copy_size(from, n, true))) from 152 include/linux/uaccess.h n = _copy_to_user(to, from, n); from 157 include/linux/uaccess.h copy_in_user(void __user *to, const void __user *from, unsigned long n) from 160 include/linux/uaccess.h if (access_ok(to, n) && access_ok(from, n)) from 161 include/linux/uaccess.h n = raw_copy_in_user(to, from, n); from 226 include/linux/uaccess.h __copy_from_user_inatomic_nocache(void *to, const void __user *from, from 229 include/linux/uaccess.h return __copy_from_user_inatomic(to, from, n); from 234 include/linux/uaccess.h extern __must_check int check_zeroed_user(const void __user *from, size_t size); from 123 include/linux/uidgid.h extern kuid_t make_kuid(struct user_namespace *from, uid_t uid); from 124 include/linux/uidgid.h extern kgid_t make_kgid(struct user_namespace *from, gid_t gid); from 143 include/linux/uidgid.h static inline kuid_t make_kuid(struct user_namespace *from, uid_t uid) from 148 include/linux/uidgid.h static inline kgid_t make_kgid(struct user_namespace *from, gid_t gid) from 66 include/linux/userfaultfd_k.h unsigned long from, unsigned long to, from 120 include/linux/userfaultfd_k.h unsigned long from, from 343 include/linux/visorbus.h struct visor_device *from); from 105 include/linux/zorro.h struct zorro_dev *from); from 145 include/media/v4l2-rect.h const struct v4l2_rect *from, from 148 include/media/v4l2-rect.h if (from->width == 0 || from->height == 0) { from 152 include/media/v4l2-rect.h r->left = (((r->left - from->left) * to->width) / from->width) & ~1; from 153 include/media/v4l2-rect.h r->width = ((r->width * to->width) / from->width) & ~1; from 154 include/media/v4l2-rect.h r->top = ((r->top - from->top) * to->height) / from->height; from 155 include/media/v4l2-rect.h r->height = (r->height * to->height) / from->height; from 203 include/net/9p/client.h int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err); from 124 include/net/checksum.h static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) from 126 include/net/checksum.h __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); from 144 include/net/checksum.h __be32 from, __be32 to, bool pseudohdr); from 146 include/net/checksum.h const __be32 *from, const __be32 *to, from 152 include/net/checksum.h __be16 from, __be16 to, from 155 include/net/checksum.h inet_proto_csum_replace4(sum, skb, (__force __be32)from, from 137 include/net/inet_ecn.h __be32 from, to; from 142 include/net/inet_ecn.h from = *(__be32 *)iph; from 143 include/net/inet_ecn.h to = from | htonl(INET_ECN_CE << 20); from 146 include/net/inet_ecn.h skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from), from 153 include/net/inet_ecn.h __be32 from, to; from 158 include/net/inet_ecn.h from = *(__be32 *)iph; from 159 include/net/inet_ecn.h to = from ^ htonl(INET_ECN_MASK << 20); from 162 include/net/inet_ecn.h skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from), from 211 include/net/ip.h int getfrag(void *from, char *to, int offset, int len, from 213 include/net/ip.h void *from, int len, int protolen, from 217 include/net/ip.h int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, from 228 include/net/ip.h int getfrag(void *from, char *to, int offset, from 230 include/net/ip.h void *from, int length, int transhdrlen, from 179 include/net/ip6_fib.h struct fib6_info __rcu *from; from 261 include/net/ip6_fib.h struct fib6_info *from; from 269 include/net/ip6_fib.h from = rcu_dereference(rt->from); from 270 include/net/ip6_fib.h if (from) from 271 include/net/ip6_fib.h fib6_get_cookie_safe(from, &cookie); from 434 include/net/ip6_fib.h const struct fib6_info *from; from 438 include/net/ip6_fib.h from = rcu_dereference(rt->from); from 439 include/net/ip6_fib.h if (from) { from 440 include/net/ip6_fib.h *addr = from->fib6_prefsrc.addr; from 472 include/net/ip_tunnels.h const void *from, int len, from 475 include/net/ip_tunnels.h memcpy(ip_tunnel_info_opts(info), from, len); from 519 include/net/ip_tunnels.h const void *from, int len, from 989 include/net/ipv6.h int getfrag(void *from, char *to, int offset, int len, from 991 include/net/ipv6.h void *from, int length, int transhdrlen, from 1005 include/net/ipv6.h int getfrag(void *from, char *to, int offset, from 1007 include/net/ipv6.h void *from, int length, int transhdrlen, from 71 include/net/ping.h int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd, from 670 include/net/sctp/structs.h struct iov_iter *from); from 17 include/net/seg6.h static inline void update_csum_diff4(struct sk_buff *skb, __be32 from, from 20 include/net/seg6.h __be32 diff[] = { ~from, to }; from 25 include/net/seg6.h static inline void update_csum_diff16(struct sk_buff *skb, __be32 *from, from 29 include/net/seg6.h ~from[0], ~from[1], ~from[2], ~from[3], from 1982 include/net/sock.h struct iov_iter *from, char *to, from 1987 include/net/sock.h if (!csum_and_copy_from_iter_full(to, copy, &csum, from)) from 1991 include/net/sock.h if (!copy_from_iter_full_nocache(to, copy, from)) from 1993 include/net/sock.h } else if (!copy_from_iter_full(to, copy, from)) from 2000 include/net/sock.h struct iov_iter *from, int copy) from 2004 include/net/sock.h err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), from 2012 include/net/sock.h static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, from 2019 include/net/sock.h err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, from 20 include/net/udplite.h static __inline__ int udplite_getfrag(void *from, char *to, int offset, from 23 include/net/udplite.h struct msghdr *msg = from; from 874 include/rdma/uverbs_ioctl.h const void *from, size_t size); from 893 include/rdma/uverbs_ioctl.h size_t idx, const void *from, size_t size); from 908 include/rdma/uverbs_ioctl.h size_t idx, const void *from, size_t size) from 931 include/rdma/uverbs_ioctl.h size_t idx, const void *from, size_t size) from 88 include/sound/pcm_params.h unsigned int from, unsigned int to) from 91 include/sound/pcm_params.h for (i = from; i <= to; i++) from 96 include/sound/pcm_params.h unsigned int from, unsigned int to) from 99 include/sound/pcm_params.h for (i = from; i <= to; i++) from 888 include/trace/events/afs.h enum afs_call_state from, from 892 include/trace/events/afs.h TP_ARGS(call, from, to, ret, remote_abort), from 896 include/trace/events/afs.h __field(enum afs_call_state, from ) from 904 include/trace/events/afs.h __entry->from = from; from 912 include/trace/events/afs.h __entry->from, __entry->to, from 1137 include/trace/events/afs.h __field(loff_t, from ) from 1147 include/trace/events/afs.h __entry->from = fl->fl_start; from 1160 include/trace/events/afs.h __entry->from, __entry->len, __entry->flags) from 562 include/trace/events/block.h sector_t from), from 564 include/trace/events/block.h TP_ARGS(q, bio, dev, from), from 580 include/trace/events/block.h __entry->old_sector = from; from 606 include/trace/events/block.h sector_t from), from 608 include/trace/events/block.h TP_ARGS(q, rq, dev, from), from 625 include/trace/events/block.h __entry->old_sector = from; from 1968 include/trace/events/ext4.h TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to, from 1971 include/trace/events/ext4.h TP_ARGS(inode, from, to, reverse, found, found_blk), from 1976 include/trace/events/ext4.h __field( ext4_lblk_t, from ) from 1986 include/trace/events/ext4.h __entry->from = from; from 1997 include/trace/events/ext4.h (unsigned) __entry->from, (unsigned) __entry->to, from 2060 include/trace/events/ext4.h ext4_lblk_t from, ext4_fsblk_t to, from 2063 include/trace/events/ext4.h TP_ARGS(inode, ex, from, to, pc), from 2068 include/trace/events/ext4.h __field( ext4_lblk_t, from ) from 2081 include/trace/events/ext4.h __entry->from = from; from 2098 include/trace/events/ext4.h (unsigned) __entry->from, from 390 include/trace/events/f2fs.h TP_PROTO(struct inode *inode, u64 from), from 392 include/trace/events/f2fs.h TP_ARGS(inode, from), from 399 include/trace/events/f2fs.h __field(u64, from) from 407 include/trace/events/f2fs.h __entry->from = from; from 415 include/trace/events/f2fs.h (unsigned long long)__entry->from) from 420 include/trace/events/f2fs.h TP_PROTO(struct inode *inode, u64 from), from 422 include/trace/events/f2fs.h TP_ARGS(inode, from) from 434 include/trace/events/f2fs.h TP_PROTO(struct inode *inode, u64 from), from 436 include/trace/events/f2fs.h TP_ARGS(inode, from) from 69 include/trace/events/power.h u32 from, from 80 include/trace/events/power.h from, from 92 include/trace/events/power.h __field(u32, from) from 104 include/trace/events/power.h __entry->from = from; from 116 include/trace/events/power.h (unsigned long)__entry->from, from 270 include/uapi/linux/cdrom.h unsigned from; from 394 include/uapi/linux/if_link.h __u32 from; from 13 include/uapi/linux/iso_fs.h #define ISODCL(from, to) (to - from + 1) from 76 include/uapi/linux/nbd.h __be64 from; from 21 include/uapi/linux/netfilter/xt_connbytes.h __aligned_u64 from; /* count to be matched */ from 1171 include/uapi/linux/perf_event.h __u64 from; from 94 include/uapi/linux/userfaultfd.h __u64 from; from 758 include/video/omapfb_dss.h struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from); from 848 include/video/omapfb_dss.h *omap_dss_get_next_device(struct omap_dss_device *from) from 36 init/do_mounts.h int __init rd_load_image(char *from); from 41 init/do_mounts.h static inline int rd_load_image(char *from) { return 0; } from 176 init/do_mounts_rd.c int __init rd_load_image(char *from) from 193 init/do_mounts_rd.c in_fd = ksys_open(from, O_RDONLY, 0); from 230 init/do_mounts_rd.c if (strcmp(from, "/initrd.image") == 0) from 255 init/do_mounts_rd.c in_fd = ksys_open(from, O_RDONLY, 0); from 39 ipc/compat.c struct compat_ipc64_perm __user *from) from 42 ipc/compat.c if (copy_from_user(&v, from, sizeof(v))) from 51 ipc/compat.c struct compat_ipc_perm __user *from) from 54 ipc/compat.c if (copy_from_user(&v, from, sizeof(v))) from 62 ipc/compat.c void to_compat_ipc64_perm(struct compat_ipc64_perm *to, struct ipc64_perm *from) from 64 ipc/compat.c to->key = from->key; from 65 ipc/compat.c to->uid = from->uid; from 66 ipc/compat.c to->gid = from->gid; from 67 ipc/compat.c to->cuid = from->cuid; from 68 ipc/compat.c to->cgid = from->cgid; from 69 ipc/compat.c to->mode = from->mode; from 70 ipc/compat.c to->seq = from->seq; from 73 ipc/compat.c void to_compat_ipc_perm(struct compat_ipc_perm *to, struct ipc64_perm *from) from 75 ipc/compat.c to->key = from->key; from 76 ipc/compat.c SET_UID(to->uid, from->uid); from 77 ipc/compat.c SET_GID(to->gid, from->gid); from 78 ipc/compat.c SET_UID(to->cuid, from->cuid); from 79 ipc/compat.c SET_GID(to->cgid, from->cgid); from 80 ipc/compat.c to->mode = from->mode; from 81 ipc/compat.c to->seq = from->seq; from 183 kernel/bpf/btf.c #define for_each_member_from(i, from, struct_type, member) \ from 184 kernel/bpf/btf.c for (i = from, member = btf_type_member(struct_type) + from; \ from 193 kernel/bpf/btf.c #define for_each_vsi_from(i, from, struct_type, member) \ from 194 kernel/bpf/btf.c for (i = from, member = btf_type_var_secinfo(struct_type) + from; \ from 891 kernel/bpf/core.c static int bpf_jit_blind_insn(const struct bpf_insn *from, from 920 kernel/bpf/core.c if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) from 923 kernel/bpf/core.c if (from->imm == 0 && from 924 kernel/bpf/core.c (from->code == (BPF_ALU | BPF_MOV | BPF_K) || from 925 kernel/bpf/core.c from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { from 926 kernel/bpf/core.c *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); from 930 kernel/bpf/core.c switch (from->code) { from 940 kernel/bpf/core.c *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); from 942 kernel/bpf/core.c *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); from 954 kernel/bpf/core.c *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); from 956 kernel/bpf/core.c *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX); from 971 kernel/bpf/core.c off = from->off; from 974 kernel/bpf/core.c *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); from 976 kernel/bpf/core.c *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); from 991 kernel/bpf/core.c off = from->off; from 994 kernel/bpf/core.c *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); from 996 kernel/bpf/core.c *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX, from 1018 kernel/bpf/core.c *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); from 1020 kernel/bpf/core.c *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); from 59 kernel/cgroup/cgroup-v1.c int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) from 73 kernel/cgroup/cgroup-v1.c from_cgrp = task_cgroup_from_root(from, root); from 98 kernel/cgroup/cgroup-v1.c int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) from 119 kernel/cgroup/cgroup-v1.c list_for_each_entry(link, &from->cset_links, cset_link) from 132 kernel/cgroup/cgroup-v1.c css_task_iter_start(&from->self, 0, &it); from 1570 kernel/cgroup/cpuset.c nodemask_t from; from 1580 kernel/cgroup/cpuset.c do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); from 1585 kernel/cgroup/cpuset.c static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, from 1593 kernel/cgroup/cpuset.c mwork->from = *from; from 373 kernel/cgroup/rdma.c argstr.from = value; from 9395 kernel/events/core.c ret = kstrtoul(args[0].from, 0, &filter->offset); from 9401 kernel/events/core.c ret = kstrtoul(args[1].from, 0, &filter->size); from 452 kernel/events/hw_breakpoint.c struct perf_event_attr *from) from 454 kernel/events/hw_breakpoint.c to->bp_addr = from->bp_addr; from 455 kernel/events/hw_breakpoint.c to->bp_type = from->bp_type; from 456 kernel/events/hw_breakpoint.c to->bp_len = from->bp_len; from 457 kernel/events/hw_breakpoint.c to->disabled = from->disabled; from 812 kernel/futex.c static int get_futex_value_locked(u32 *dest, u32 __user *from) from 817 kernel/futex.c ret = __get_user(*dest, from); from 150 kernel/irq/devres.c unsigned int from; from 158 kernel/irq/devres.c irq_free_descs(this->from, this->cnt); from 178 kernel/irq/devres.c int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, from 189 kernel/irq/devres.c base = __irq_alloc_descs(irq, from, cnt, node, owner, affinity); from 195 kernel/irq/devres.c dr->from = base; from 736 kernel/irq/irqdesc.c void irq_free_descs(unsigned int from, unsigned int cnt) from 740 kernel/irq/irqdesc.c if (from >= nr_irqs || (from + cnt) > nr_irqs) from 745 kernel/irq/irqdesc.c free_desc(from + i); from 747 kernel/irq/irqdesc.c bitmap_clear(allocated_irqs, from, cnt); from 766 kernel/irq/irqdesc.c __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, from 775 kernel/irq/irqdesc.c if (from > irq) from 777 kernel/irq/irqdesc.c from = irq; from 784 kernel/irq/irqdesc.c from = arch_dynirq_lower_bound(from); from 790 kernel/irq/irqdesc.c from, cnt, 0); from 845 kernel/irq/irqdesc.c void irq_free_hwirqs(unsigned int from, int cnt) from 849 kernel/irq/irqdesc.c for (i = from, j = cnt; j > 0; i++, j--) { from 853 kernel/irq/irqdesc.c irq_free_descs(from, cnt); from 680 kernel/module.c const void *from, unsigned long size) from 685 kernel/module.c memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); from 756 kernel/module.c const void *from, unsigned long size) from 817 kernel/printk/printk.c static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from) from 824 kernel/printk/printk.c size_t len = iov_iter_count(from); from 845 kernel/printk/printk.c if (!copy_from_iter_full(buf, len, from)) { from 1137 kernel/relay.c void *from; from 1148 kernel/relay.c from = buf->start + read_start; from 1150 kernel/relay.c if (copy_to_user(buffer, from, avail)) from 710 kernel/sched/core.c int walk_tg_tree_from(struct task_group *from, from 716 kernel/sched/core.c parent = from; from 730 kernel/sched/core.c if (ret || parent == from) from 429 kernel/sched/sched.h extern int walk_tg_tree_from(struct task_group *from, from 3192 kernel/signal.c int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from) from 3195 kernel/signal.c if (copy_to_user(to, from , sizeof(struct kernel_siginfo))) from 3203 kernel/signal.c const siginfo_t __user *from) from 3206 kernel/signal.c char __user *expansion = si_expansion(from); from 3226 kernel/signal.c const siginfo_t __user *from) from 3228 kernel/signal.c if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) from 3231 kernel/signal.c return post_copy_siginfo_from_user(to, from); from 3234 kernel/signal.c int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) from 3236 kernel/signal.c if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) from 3238 kernel/signal.c return post_copy_siginfo_from_user(to, from); from 3243 kernel/signal.c const struct kernel_siginfo *from) from 3246 kernel/signal.c return __copy_siginfo_to_user32(to, from, in_x32_syscall()); from 3249 kernel/signal.c const struct kernel_siginfo *from, bool x32_ABI) from 3255 kernel/signal.c new.si_signo = from->si_signo; from 3256 kernel/signal.c new.si_errno = from->si_errno; from 3257 kernel/signal.c new.si_code = from->si_code; from 3258 kernel/signal.c switch(siginfo_layout(from->si_signo, from->si_code)) { from 3260 kernel/signal.c new.si_pid = from->si_pid; from 3261 kernel/signal.c new.si_uid = from->si_uid; from 3264 kernel/signal.c new.si_tid = from->si_tid; from 3265 kernel/signal.c new.si_overrun = from->si_overrun; from 3266 kernel/signal.c new.si_int = from->si_int; from 3269 kernel/signal.c new.si_band = from->si_band; from 3270 kernel/signal.c new.si_fd = from->si_fd; from 3273 kernel/signal.c new.si_addr = ptr_to_compat(from->si_addr); from 3275 kernel/signal.c new.si_trapno = from->si_trapno; from 3279 kernel/signal.c new.si_addr = ptr_to_compat(from->si_addr); from 3281 kernel/signal.c new.si_trapno = from->si_trapno; from 3283 kernel/signal.c new.si_addr_lsb = from->si_addr_lsb; from 3286 kernel/signal.c new.si_addr = ptr_to_compat(from->si_addr); from 3288 kernel/signal.c new.si_trapno = from->si_trapno; from 3290 kernel/signal.c new.si_lower = ptr_to_compat(from->si_lower); from 3291 kernel/signal.c new.si_upper = ptr_to_compat(from->si_upper); from 3294 kernel/signal.c new.si_addr = ptr_to_compat(from->si_addr); from 3296 kernel/signal.c new.si_trapno = from->si_trapno; from 3298 kernel/signal.c new.si_pkey = from->si_pkey; from 3301 kernel/signal.c new.si_pid = from->si_pid; from 3302 kernel/signal.c new.si_uid = from->si_uid; from 3303 kernel/signal.c new.si_status = from->si_status; from 3306 kernel/signal.c new._sifields._sigchld_x32._utime = from->si_utime; from 3307 kernel/signal.c new._sifields._sigchld_x32._stime = from->si_stime; from 3311 kernel/signal.c new.si_utime = from->si_utime; from 3312 kernel/signal.c new.si_stime = from->si_stime; from 3316 kernel/signal.c new.si_pid = from->si_pid; from 3317 kernel/signal.c new.si_uid = from->si_uid; from 3318 kernel/signal.c new.si_int = from->si_int; from 3321 kernel/signal.c new.si_call_addr = ptr_to_compat(from->si_call_addr); from 3322 kernel/signal.c new.si_syscall = from->si_syscall; from 3323 kernel/signal.c new.si_arch = from->si_arch; from 3334 kernel/signal.c const struct compat_siginfo *from) from 3337 kernel/signal.c to->si_signo = from->si_signo; from 3338 kernel/signal.c to->si_errno = from->si_errno; from 3339 kernel/signal.c to->si_code = from->si_code; from 3340 kernel/signal.c switch(siginfo_layout(from->si_signo, from->si_code)) { from 3342 kernel/signal.c to->si_pid = from->si_pid; from 3343 kernel/signal.c to->si_uid = from->si_uid; from 3346 kernel/signal.c to->si_tid = from->si_tid; from 3347 kernel/signal.c to->si_overrun = from->si_overrun; from 3348 kernel/signal.c to->si_int = from->si_int; from 3351 kernel/signal.c to->si_band = from->si_band; from 3352 kernel/signal.c to->si_fd = from->si_fd; from 3355 kernel/signal.c to->si_addr = compat_ptr(from->si_addr); from 3357 kernel/signal.c to->si_trapno = from->si_trapno; from 3361 kernel/signal.c to->si_addr = compat_ptr(from->si_addr); from 3363 kernel/signal.c to->si_trapno = from->si_trapno; from 3365 kernel/signal.c to->si_addr_lsb = from->si_addr_lsb; from 3368 kernel/signal.c to->si_addr = compat_ptr(from->si_addr); from 3370 kernel/signal.c to->si_trapno = from->si_trapno; from 3372 kernel/signal.c to->si_lower = compat_ptr(from->si_lower); from 3373 kernel/signal.c to->si_upper = compat_ptr(from->si_upper); from 3376 kernel/signal.c to->si_addr = compat_ptr(from->si_addr); from 3378 kernel/signal.c to->si_trapno = from->si_trapno; from 3380 kernel/signal.c to->si_pkey = from->si_pkey; from 3383 kernel/signal.c to->si_pid = from->si_pid; from 3384 kernel/signal.c to->si_uid = from->si_uid; from 3385 kernel/signal.c to->si_status = from->si_status; from 3388 kernel/signal.c to->si_utime = from->_sifields._sigchld_x32._utime; from 3389 kernel/signal.c to->si_stime = from->_sifields._sigchld_x32._stime; from 3393 kernel/signal.c to->si_utime = from->si_utime; from 3394 kernel/signal.c to->si_stime = from->si_stime; from 3398 kernel/signal.c to->si_pid = from->si_pid; from 3399 kernel/signal.c to->si_uid = from->si_uid; from 3400 kernel/signal.c to->si_int = from->si_int; from 3403 kernel/signal.c to->si_call_addr = compat_ptr(from->si_call_addr); from 3404 kernel/signal.c to->si_syscall = from->si_syscall; from 3405 kernel/signal.c to->si_arch = from->si_arch; from 3414 kernel/signal.c struct compat_siginfo from; from 3416 kernel/signal.c if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) from 3419 kernel/signal.c from.si_signo = signo; from 3420 kernel/signal.c return post_copy_siginfo_from_user32(to, &from); from 3426 kernel/signal.c struct compat_siginfo from; from 3428 kernel/signal.c if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) from 3431 kernel/signal.c return post_copy_siginfo_from_user32(to, &from); from 710 kernel/softirq.c unsigned int __weak arch_dynirq_lower_bound(unsigned int from) from 712 kernel/softirq.c return from; from 45 kernel/time/clocksource.c clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec) from 54 kernel/time/clocksource.c tmp = ((u64)maxsec * from) >> 32; from 66 kernel/time/clocksource.c tmp += from / 2; from 67 kernel/time/clocksource.c do_div(tmp, from); from 191 kernel/time/tick-common.c int from = tick_do_timer_boot_cpu; from 193 kernel/time/tick-common.c if (from >= 0 && from != cpu) from 194 kernel/time/tick-common.c smp_call_function_single(from, giveup_do_timer, &cpu, 1); from 1023 kernel/trace/blktrace.c dev_t dev, sector_t from) from 1037 kernel/trace/blktrace.c r.sector_from = cpu_to_be64(from); from 1061 kernel/trace/blktrace.c sector_t from) from 1075 kernel/trace/blktrace.c r.sector_from = cpu_to_be64(from); from 3375 kernel/workqueue.c const struct workqueue_attrs *from) from 3377 kernel/workqueue.c to->nice = from->nice; from 3378 kernel/workqueue.c cpumask_copy(to->cpumask, from->cpumask); from 3384 kernel/workqueue.c to->no_numa = from->no_numa; from 20 lib/iomap_copy.c const void *from, from 24 lib/iomap_copy.c const u32 *src = from; from 42 lib/iomap_copy.c void __ioread32_copy(void *to, const void __iomem *from, size_t count) from 45 lib/iomap_copy.c const u32 __iomem *src = from; from 64 lib/iomap_copy.c const void *from, from 69 lib/iomap_copy.c const u64 *src = from; from 75 lib/iomap_copy.c __iowrite32_copy(to, from, count * 2); from 138 lib/iov_iter.c static int copyout(void __user *to, const void *from, size_t n) from 141 lib/iov_iter.c kasan_check_read(from, n); from 142 lib/iov_iter.c n = raw_copy_to_user(to, from, n); from 147 lib/iov_iter.c static int copyin(void *to, const void __user *from, size_t n) from 149 lib/iov_iter.c if (access_ok(from, n)) { from 151 lib/iov_iter.c n = raw_copy_from_user(to, from, n); from 162 lib/iov_iter.c void *kaddr, *from; from 179 lib/iov_iter.c from = kaddr + offset; from 182 lib/iov_iter.c left = copyout(buf, from, copy); from 185 lib/iov_iter.c from += copy; from 192 lib/iov_iter.c left = copyout(buf, from, copy); from 195 lib/iov_iter.c from += copy; from 202 lib/iov_iter.c offset = from - kaddr; from 210 lib/iov_iter.c from = kaddr + offset; from 211 lib/iov_iter.c left = copyout(buf, from, copy); from 214 lib/iov_iter.c from += copy; from 220 lib/iov_iter.c left = copyout(buf, from, copy); from 223 lib/iov_iter.c from += copy; from 459 lib/iov_iter.c char *from = kmap_atomic(page); from 460 lib/iov_iter.c memcpy(to, from + offset, len); from 461 lib/iov_iter.c kunmap_atomic(from); from 464 lib/iov_iter.c static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len) from 467 lib/iov_iter.c memcpy(to + offset, from, len); from 565 lib/iov_iter.c static __wsum csum_and_memcpy(void *to, const void *from, size_t len, from 568 lib/iov_iter.c __wsum next = csum_partial_copy_nocheck(from, to, len, 0); from 605 lib/iov_iter.c const char *from = addr; from 611 lib/iov_iter.c copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), from 613 lib/iov_iter.c (from += v.bv_len) - v.bv_len, v.bv_len), from 614 lib/iov_iter.c memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) from 622 lib/iov_iter.c static int copyout_mcsafe(void __user *to, const void *from, size_t n) from 625 lib/iov_iter.c kasan_check_read(from, n); from 626 lib/iov_iter.c n = copy_to_user_mcsafe((__force void *) to, from, n); from 632 lib/iov_iter.c const char *from, size_t len) from 638 lib/iov_iter.c ret = memcpy_mcsafe(to + offset, from, len); from 703 lib/iov_iter.c const char *from = addr; from 711 lib/iov_iter.c copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), from 714 lib/iov_iter.c (from += v.bv_len) - v.bv_len, v.bv_len); from 716 lib/iov_iter.c curr_addr = (unsigned long) from; from 722 lib/iov_iter.c rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, from 725 lib/iov_iter.c curr_addr = (unsigned long) from; from 1490 lib/iov_iter.c const char *from = addr; from 1505 lib/iov_iter.c next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len, from 1516 lib/iov_iter.c (from += v.bv_len) - v.bv_len, from 1522 lib/iov_iter.c (from += v.iov_len) - v.iov_len, from 176 lib/kfifo.c const void __user *from, unsigned int len, unsigned int off, from 192 lib/kfifo.c ret = copy_from_user(fifo->data + off, from, l); from 196 lib/kfifo.c ret = copy_from_user(fifo->data, from + l, len - l); from 210 lib/kfifo.c int __kfifo_from_user(struct __kfifo *fifo, const void __user *from, from 225 lib/kfifo.c ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied); from 499 lib/kfifo.c int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, from 513 lib/kfifo.c ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied); from 56 lib/parser.c args[argc].from = s; from 80 lib/parser.c if (args[argc].to == args[argc].from) from 301 lib/parser.c size_t ret = src->to - src->from; from 305 lib/parser.c memcpy(dest, src->from, len); from 322 lib/parser.c return kmemdup_nul(s->from, s->to - s->from, GFP_KERNEL); from 42 lib/test_user_copy.c static bool is_zeroed(void *from, size_t size) from 44 lib/test_user_copy.c return memchr_inv(from, 0x0, size) == NULL; from 8 lib/usercopy.c unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) from 12 lib/usercopy.c if (likely(access_ok(from, n))) { from 14 lib/usercopy.c res = raw_copy_from_user(to, from, n); from 24 lib/usercopy.c unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) from 28 lib/usercopy.c kasan_check_read(from, n); from 29 lib/usercopy.c n = raw_copy_to_user(to, from, n); from 50 lib/usercopy.c int check_zeroed_user(const void __user *from, size_t size) from 53 lib/usercopy.c uintptr_t align = (uintptr_t) from % sizeof(unsigned long); from 58 lib/usercopy.c from -= align; from 61 lib/usercopy.c if (!user_access_begin(from, size)) from 64 lib/usercopy.c unsafe_get_user(val, (unsigned long __user *) from, err_fault); from 72 lib/usercopy.c from += sizeof(unsigned long); from 75 lib/usercopy.c unsafe_get_user(val, (unsigned long __user *) from, err_fault); from 114 lib/zlib_inflate/inffast.c unsigned char *from; /* where to copy match from */ from 208 lib/zlib_inflate/inffast.c from = window - OFF; from 210 lib/zlib_inflate/inffast.c from += wsize - op; from 214 lib/zlib_inflate/inffast.c PUP(out) = PUP(from); from 216 lib/zlib_inflate/inffast.c from = out - dist; /* rest from output */ from 220 lib/zlib_inflate/inffast.c from += wsize + write - op; from 225 lib/zlib_inflate/inffast.c PUP(out) = PUP(from); from 227 lib/zlib_inflate/inffast.c from = window - OFF; from 232 lib/zlib_inflate/inffast.c PUP(out) = PUP(from); from 234 lib/zlib_inflate/inffast.c from = out - dist; /* rest from output */ from 239 lib/zlib_inflate/inffast.c from += write - op; from 243 lib/zlib_inflate/inffast.c PUP(out) = PUP(from); from 245 lib/zlib_inflate/inffast.c from = out - dist; /* rest from output */ from 249 lib/zlib_inflate/inffast.c PUP(out) = PUP(from); from 250 lib/zlib_inflate/inffast.c PUP(out) = PUP(from); from 251 lib/zlib_inflate/inffast.c PUP(out) = PUP(from); from 255 lib/zlib_inflate/inffast.c PUP(out) = PUP(from); from 257 lib/zlib_inflate/inffast.c PUP(out) = PUP(from); from 264 lib/zlib_inflate/inffast.c from = out - dist; /* copy direct from output */ from 268 lib/zlib_inflate/inffast.c PUP(out) = PUP(from); from 275 lib/zlib_inflate/inffast.c sfrom = (unsigned short *)(from - OFF); from 285 lib/zlib_inflate/inffast.c from = (unsigned char *)sfrom + OFF; from 304 lib/zlib_inflate/inffast.c PUP(out) = PUP(from); from 327 lib/zlib_inflate/inflate.c unsigned char *from; /* where to copy match bytes from */ from 665 lib/zlib_inflate/inflate.c from = state->window + (state->wsize - copy); from 668 lib/zlib_inflate/inflate.c from = state->window + (state->write - copy); from 672 lib/zlib_inflate/inflate.c from = put - state->offset; from 679 lib/zlib_inflate/inflate.c *put++ = *from++; from 2938 mm/filemap.c inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) from 2948 mm/filemap.c if (!iov_iter_count(from)) from 2958 mm/filemap.c count = iov_iter_count(from); from 2963 mm/filemap.c iov_iter_truncate(from, count); from 2964 mm/filemap.c return iov_iter_count(from); from 3144 mm/filemap.c generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) from 3154 mm/filemap.c write_len = iov_iter_count(from); from 3187 mm/filemap.c written = mapping->a_ops->direct_IO(iocb, from); from 3215 mm/filemap.c iov_iter_revert(from, write_len - iov_iter_count(from)); from 3348 mm/filemap.c ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) from 3370 mm/filemap.c written = generic_file_direct_write(iocb, from); from 3378 mm/filemap.c if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) from 3381 mm/filemap.c status = generic_perform_write(file, from, pos = iocb->ki_pos); from 3413 mm/filemap.c written = generic_perform_write(file, from, iocb->ki_pos); from 3436 mm/filemap.c ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) from 3443 mm/filemap.c ret = generic_write_checks(iocb, from); from 3445 mm/filemap.c ret = __generic_file_write_iter(iocb, from); from 244 mm/hugetlb.c long from; from 280 mm/hugetlb.c if (&rg->link == head || t < rg->from) { from 288 mm/hugetlb.c nrg->from = f; from 297 mm/hugetlb.c if (f > rg->from) from 298 mm/hugetlb.c f = rg->from; from 305 mm/hugetlb.c if (rg->from > t) from 318 mm/hugetlb.c add -= (rg->to - rg->from); from 324 mm/hugetlb.c add += (nrg->from - f); /* Added to beginning of region */ from 325 mm/hugetlb.c nrg->from = f; from 401 mm/hugetlb.c if (&rg->link == head || t < rg->from) { from 409 mm/hugetlb.c nrg->from = f; from 421 mm/hugetlb.c if (f > rg->from) from 422 mm/hugetlb.c f = rg->from; from 429 mm/hugetlb.c if (rg->from > t) from 439 mm/hugetlb.c chg -= rg->to - rg->from; from 502 mm/hugetlb.c if (rg->to <= f && (rg->to != rg->from || rg->to != f)) from 505 mm/hugetlb.c if (rg->from >= t) from 508 mm/hugetlb.c if (f > rg->from && t < rg->to) { /* Must split region */ from 533 mm/hugetlb.c nrg->from = t; from 545 mm/hugetlb.c if (f <= rg->from && t >= rg->to) { /* Remove entire region */ from 546 mm/hugetlb.c del += rg->to - rg->from; from 552 mm/hugetlb.c if (f <= rg->from) { /* Trim beginning of region */ from 553 mm/hugetlb.c del += t - rg->from; from 554 mm/hugetlb.c rg->from = t; from 606 mm/hugetlb.c if (rg->from >= t) from 609 mm/hugetlb.c seg_from = max(rg->from, f); from 4646 mm/hugetlb.c long from, long to, from 4657 mm/hugetlb.c if (from > to) { from 4684 mm/hugetlb.c chg = region_chg(resv_map, from, to); from 4691 mm/hugetlb.c chg = to - from; from 4736 mm/hugetlb.c long add = region_add(resv_map, from, to); from 4758 mm/hugetlb.c region_abort(resv_map, from, to); from 73 mm/kasan/quarantine.c static void qlist_move_all(struct qlist_head *from, struct qlist_head *to) from 75 mm/kasan/quarantine.c if (unlikely(qlist_empty(from))) from 79 mm/kasan/quarantine.c *to = *from; from 80 mm/kasan/quarantine.c qlist_init(from); from 84 mm/kasan/quarantine.c to->tail->next = from->head; from 85 mm/kasan/quarantine.c to->tail = from->tail; from 86 mm/kasan/quarantine.c to->bytes += from->bytes; from 88 mm/kasan/quarantine.c qlist_init(from); from 264 mm/kasan/quarantine.c static void qlist_move_cache(struct qlist_head *from, from 270 mm/kasan/quarantine.c if (unlikely(qlist_empty(from))) from 273 mm/kasan/quarantine.c curr = from->head; from 274 mm/kasan/quarantine.c qlist_init(from); from 282 mm/kasan/quarantine.c qlist_put(from, curr, obj_cache->size); from 191 mm/memcontrol.c struct mem_cgroup *from; from 1162 mm/memcontrol.c static void __invalidate_reclaim_iterators(struct mem_cgroup *from, from 1171 mm/memcontrol.c mz = mem_cgroup_nodeinfo(from, nid); from 1357 mm/memcontrol.c struct mem_cgroup *from; from 1365 mm/memcontrol.c from = mc.from; from 1367 mm/memcontrol.c if (!from) from 1370 mm/memcontrol.c ret = mem_cgroup_is_descendant(from, memcg) || from 3146 mm/memcontrol.c struct mem_cgroup *from, struct mem_cgroup *to) from 3150 mm/memcontrol.c old_id = mem_cgroup_id(from); from 3154 mm/memcontrol.c mod_memcg_state(from, MEMCG_SWAP, -1); from 3162 mm/memcontrol.c struct mem_cgroup *from, struct mem_cgroup *to) from 5485 mm/memcontrol.c struct mem_cgroup *from, from 5495 mm/memcontrol.c VM_BUG_ON(from == to); from 5508 mm/memcontrol.c if (page->mem_cgroup != from) from 5514 mm/memcontrol.c from_vec = mem_cgroup_lruvec(pgdat, from); from 5517 mm/memcontrol.c spin_lock_irqsave(&from->move_lock, flags); from 5552 mm/memcontrol.c spin_unlock_irqrestore(&from->move_lock, flags); from 5559 mm/memcontrol.c mem_cgroup_charge_statistics(from, page, compound, -nr_pages); from 5560 mm/memcontrol.c memcg_check_events(from, page); from 5616 mm/memcontrol.c if (page->mem_cgroup == mc.from) { from 5631 mm/memcontrol.c mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) { from 5660 mm/memcontrol.c if (page->mem_cgroup == mc.from) { from 5740 mm/memcontrol.c struct mem_cgroup *from = mc.from; from 5753 mm/memcontrol.c cancel_charge(mc.from, mc.moved_charge); from 5759 mm/memcontrol.c if (!mem_cgroup_is_root(mc.from)) from 5760 mm/memcontrol.c page_counter_uncharge(&mc.from->memsw, mc.moved_swap); from 5762 mm/memcontrol.c mem_cgroup_id_put_many(mc.from, mc.moved_swap); from 5776 mm/memcontrol.c memcg_oom_recover(from); from 5792 mm/memcontrol.c mc.from = NULL; from 5804 mm/memcontrol.c struct mem_cgroup *from; from 5838 mm/memcontrol.c from = mem_cgroup_from_task(p); from 5840 mm/memcontrol.c VM_BUG_ON(from == memcg); from 5847 mm/memcontrol.c VM_BUG_ON(mc.from); from 5855 mm/memcontrol.c mc.from = from; from 5899 mm/memcontrol.c mc.from, mc.to)) { from 5909 mm/memcontrol.c mc.from, mc.to)) { from 5948 mm/memcontrol.c mc.from, mc.to)) { from 5960 mm/memcontrol.c if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) { from 6000 mm/memcontrol.c atomic_inc(&mc.from->moving_account); from 6023 mm/memcontrol.c atomic_dec(&mc.from->moving_account); from 1059 mm/mempolicy.c int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, from 1103 mm/mempolicy.c tmp = *from; from 1126 mm/mempolicy.c if ((nodes_weight(*from) != nodes_weight(*to)) && from 1130 mm/mempolicy.c d = node_remap(s, *from, *to); from 1205 mm/mempolicy.c int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, from 1399 mm/migrate.c int migrate_pages(struct list_head *from, new_page_t get_new_page, from 1418 mm/migrate.c list_for_each_entry_safe(page, page2, from, lru) { from 1446 mm/migrate.c rc = split_huge_page_to_list(page, from); from 532 mm/nommu.c static void free_page_series(unsigned long from, unsigned long to) from 534 mm/nommu.c for (; from < to; from += PAGE_SIZE) { from 535 mm/nommu.c struct page *page = virt_to_page(from); from 1448 mm/nommu.c unsigned long from, unsigned long to) from 1455 mm/nommu.c if (from > vma->vm_start) from 1456 mm/nommu.c vma->vm_end = from; from 1467 mm/nommu.c if (from > region->vm_start) { from 1469 mm/nommu.c region->vm_top = region->vm_end = from; from 1476 mm/nommu.c free_page_series(from, to); from 294 mm/page_io.c struct iov_iter from; from 296 mm/page_io.c iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE); from 302 mm/page_io.c ret = mapping->a_ops->direct_IO(&kiocb, &from); from 2516 mm/shmem.c unsigned from = pos & (PAGE_SIZE - 1); from 2517 mm/shmem.c zero_user_segments(page, 0, from, from 2518 mm/shmem.c from + copied, PAGE_SIZE); from 575 mm/slab.c struct array_cache *from, unsigned int max) from 578 mm/slab.c int nr = min3(from->avail, max, to->limit - to->avail); from 583 mm/slab.c memcpy(to->entry + to->avail, from->entry + from->avail -nr, from 586 mm/slab.c from->avail -= nr; from 727 mm/slub.c void *from, void *to) from 729 mm/slub.c slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); from 730 mm/slub.c memset(from, data, to - from); from 865 mm/truncate.c void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) from 874 mm/truncate.c if (from >= to || bsize == PAGE_SIZE) from 877 mm/truncate.c rounded_from = round_up(from, bsize); from 881 mm/truncate.c index = from >> PAGE_SHIFT; from 439 mm/vmalloc.c struct rb_root *root, struct rb_node *from, from 452 mm/vmalloc.c link = &from; from 665 mm/vmalloc.c struct rb_node *from, struct rb_root *root, from 671 mm/vmalloc.c if (from) from 672 mm/vmalloc.c link = find_va_links(va, NULL, from, &parent); from 122 net/8021q/vlan_netlink.c vlan_dev_set_ingress_priority(dev, m->to, m->from); from 128 net/8021q/vlan_netlink.c err = vlan_dev_set_egress_priority(dev, m->from, m->to); from 240 net/8021q/vlan_netlink.c m.from = i; from 260 net/8021q/vlan_netlink.c m.from = pm->priority; from 1630 net/9p/client.c p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err) from 1639 net/9p/client.c iov_iter_count(from)); from 1641 net/9p/client.c while (iov_iter_count(from)) { from 1642 net/9p/client.c int count = iov_iter_count(from); from 1652 net/9p/client.c req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, from, 0, from 1657 net/9p/client.c offset, rsize, from); from 1678 net/9p/client.c iov_iter_advance(from, count); from 63 net/9p/protocol.c pdu_write_u(struct p9_fcall *pdu, struct iov_iter *from, size_t size) from 66 net/9p/protocol.c struct iov_iter i = *from; from 435 net/9p/protocol.c struct iov_iter *from = from 439 net/9p/protocol.c if (!errcode && pdu_write_u(pdu, from, count)) from 88 net/atm/atm_misc.c void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to) from 90 net/atm/atm_misc.c #define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) from 96 net/atm/atm_misc.c void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to) from 98 net/atm/atm_misc.c #define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i) from 161 net/atm/resources.c static void copy_aal_stats(struct k_atm_aal_stats *from, from 164 net/atm/resources.c #define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) from 169 net/atm/resources.c static void subtract_aal_stats(struct k_atm_aal_stats *from, from 172 net/atm/resources.c #define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i) from 618 net/batman-adv/main.c unsigned int from; from 625 net/batman-adv/main.c from = (unsigned int)(payload_ptr - skb->data); from 627 net/batman-adv/main.c skb_prepare_seq_read(skb, from, to, &st); from 194 net/bpf/test_run.c static inline bool range_is_zero(void *buf, size_t from, size_t to) from 196 net/bpf/test_run.c return !memchr_inv((u8 *)buf + from, 0, to - from); from 547 net/bridge/netfilter/ebtables.c int from; from 773 net/bridge/netfilter/ebtables.c if (cl_s[chain_nr].from != -1) from 775 net/bridge/netfilter/ebtables.c cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries; from 781 net/bridge/netfilter/ebtables.c chain_nr = cl_s[chain_nr].from; from 814 net/bridge/netfilter/ebtables.c cl_s[i].from = chain_nr; from 122 net/caif/cfpkt_skbuff.c u8 *from; from 137 net/caif/cfpkt_skbuff.c from = skb_pull(skb, len); from 138 net/caif/cfpkt_skbuff.c from -= len; from 140 net/caif/cfpkt_skbuff.c memcpy(data, from, len); from 149 net/caif/cfpkt_skbuff.c u8 *from; from 161 net/caif/cfpkt_skbuff.c from = skb_tail_pointer(skb) - len; from 163 net/caif/cfpkt_skbuff.c memcpy(data, from, len); from 261 net/can/gw.c int from = calc_idx(xor->from_idx, cf->len); from 267 net/can/gw.c if (from < 0 || to < 0 || res < 0) from 270 net/can/gw.c if (from <= to) { from 271 net/can/gw.c for (i = from; i <= to; i++) from 274 net/can/gw.c for (i = from; i >= to; i--) from 306 net/can/gw.c int from = calc_idx(crc8->from_idx, cf->len); from 312 net/can/gw.c if (from < 0 || to < 0 || res < 0) from 315 net/can/gw.c if (from <= to) { from 425 net/ceph/ceph_common.c argstr[0].from); from 431 net/ceph/ceph_common.c err = ceph_parse_ips(argstr[0].from, from 441 net/ceph/ceph_common.c err = parse_fsid(argstr[0].from, &opt->fsid); from 447 net/ceph/ceph_common.c opt->name = kstrndup(argstr[0].from, from 448 net/ceph/ceph_common.c argstr[0].to-argstr[0].from, from 464 net/ceph/ceph_common.c err = ceph_crypto_key_unarmor(opt->key, argstr[0].from); from 477 net/ceph/ceph_common.c err = get_secret(opt->key, argstr[0].from); from 2411 net/ceph/osdmap.c int from = pg->pg_upmap_items.from_to[i][0]; from 2425 net/ceph/osdmap.c if (osd == from && pos < 0 && from 552 net/core/datagram.c struct iov_iter *from, from 563 net/core/datagram.c if (copy_from_iter(skb->data + offset, copy, from) != copy) from 585 net/core/datagram.c copy, from); from 607 net/core/datagram.c from, copy)) from 624 net/core/datagram.c struct iov_iter *from, size_t length) from 628 net/core/datagram.c while (length && iov_iter_count(from)) { from 638 net/core/datagram.c copied = iov_iter_get_pages(from, pages, length, from 643 net/core/datagram.c iov_iter_advance(from, copied); from 678 net/core/datagram.c int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from) from 680 net/core/datagram.c int copy = min_t(int, skb_headlen(skb), iov_iter_count(from)); from 683 net/core/datagram.c if (skb_copy_datagram_from_iter(skb, 0, from, copy)) from 686 net/core/datagram.c return __zerocopy_sg_from_iter(NULL, skb, from, ~0U); from 13 net/core/datagram.h struct iov_iter *from, size_t length); from 633 net/core/dev_addr_lists.c int dev_uc_sync(struct net_device *to, struct net_device *from) from 637 net/core/dev_addr_lists.c if (to->addr_len != from->addr_len) from 641 net/core/dev_addr_lists.c err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); from 663 net/core/dev_addr_lists.c int dev_uc_sync_multiple(struct net_device *to, struct net_device *from) from 667 net/core/dev_addr_lists.c if (to->addr_len != from->addr_len) from 671 net/core/dev_addr_lists.c err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len); from 688 net/core/dev_addr_lists.c void dev_uc_unsync(struct net_device *to, struct net_device *from) from 690 net/core/dev_addr_lists.c if (to->addr_len != from->addr_len) from 693 net/core/dev_addr_lists.c netif_addr_lock_bh(from); from 695 net/core/dev_addr_lists.c __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); from 698 net/core/dev_addr_lists.c netif_addr_unlock_bh(from); from 854 net/core/dev_addr_lists.c int dev_mc_sync(struct net_device *to, struct net_device *from) from 858 net/core/dev_addr_lists.c if (to->addr_len != from->addr_len) from 862 net/core/dev_addr_lists.c err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); from 884 net/core/dev_addr_lists.c int dev_mc_sync_multiple(struct net_device *to, struct net_device *from) from 888 net/core/dev_addr_lists.c if (to->addr_len != from->addr_len) from 892 net/core/dev_addr_lists.c err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len); from 909 net/core/dev_addr_lists.c void dev_mc_unsync(struct net_device *to, struct net_device *from) from 911 net/core/dev_addr_lists.c if (to->addr_len != from->addr_len) from 914 net/core/dev_addr_lists.c netif_addr_lock_bh(from); from 916 net/core/dev_addr_lists.c __hw_addr_unsync(&to->mc, &from->mc, to->addr_len); from 919 net/core/dev_addr_lists.c netif_addr_unlock_bh(from); from 561 net/core/ethtool.c const void __user *from) from 565 net/core/ethtool.c if (copy_from_user(&link_usettings, from, sizeof(link_usettings))) from 588 net/core/ethtool.c const struct ethtool_link_ksettings *from) from 592 net/core/ethtool.c memcpy(&link_usettings.base, &from->base, sizeof(link_usettings)); from 594 net/core/ethtool.c from->link_modes.supported, from 597 net/core/ethtool.c from->link_modes.advertising, from 600 net/core/ethtool.c from->link_modes.lp_advertising, from 1665 net/core/filter.c const void *, from, u32, len, u64, flags) from 1680 net/core/filter.c memcpy(ptr, from, len); from 1874 net/core/filter.c u64, from, u64, to, u64, flags) from 1888 net/core/filter.c if (unlikely(from != 0)) from 1894 net/core/filter.c csum_replace2(ptr, from, to); from 1897 net/core/filter.c csum_replace4(ptr, from, to); from 1918 net/core/filter.c u64, from, u64, to, u64, flags) from 1939 net/core/filter.c if (unlikely(from != 0)) from 1945 net/core/filter.c inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); from 1948 net/core/filter.c inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo); from 1970 net/core/filter.c BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size, from 1990 net/core/filter.c sp->diff[j] = ~from[i]; from 2225 net/core/filter.c u8 *raw, *to, *from; from 2282 net/core/filter.c from = sg_virt(sge); from 2286 net/core/filter.c memcpy(to, from, len); from 2350 net/core/filter.c u8 *raw, *to, *from; from 2395 net/core/filter.c from = sg_virt(psge); from 2398 net/core/filter.c memcpy(raw, from, front); from 2401 net/core/filter.c from += front; from 2404 net/core/filter.c memcpy(to, from, back); from 2574 net/core/filter.c u8 *to, *from; from 2584 net/core/filter.c from = sg_virt(sge); from 2586 net/core/filter.c memcpy(to, from, a); from 2587 net/core/filter.c memcpy(to + a, from + a + pop, b); from 3933 net/core/filter.c const struct bpf_tunnel_key *, from, u32, size, u64, flags) from 3950 net/core/filter.c memcpy(compat, from, size); from 3952 net/core/filter.c from = (const struct bpf_tunnel_key *) compat; from 3958 net/core/filter.c if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) || from 3959 net/core/filter.c from->tunnel_ext)) from 3978 net/core/filter.c info->key.tun_id = cpu_to_be64(from->tunnel_id); from 3979 net/core/filter.c info->key.tos = from->tunnel_tos; from 3980 net/core/filter.c info->key.ttl = from->tunnel_ttl; from 3984 net/core/filter.c memcpy(&info->key.u.ipv6.dst, from->remote_ipv6, from 3985 net/core/filter.c sizeof(from->remote_ipv6)); from 3986 net/core/filter.c info->key.label = cpu_to_be32(from->tunnel_label) & from 3989 net/core/filter.c info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4); from 4006 net/core/filter.c const u8 *, from, u32, size) from 4016 net/core/filter.c ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT); from 5017 net/core/filter.c const void *, from, u32, len) from 5044 net/core/filter.c memcpy(skb->data + offset, from, len); from 2550 net/core/skbuff.c int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) from 2562 net/core/skbuff.c skb_copy_to_linear_data_offset(skb, offset, from, copy); from 2566 net/core/skbuff.c from += copy; from 2588 net/core/skbuff.c memcpy(vaddr + p_off, from + copied, p_len); from 2595 net/core/skbuff.c from += copy; from 2610 net/core/skbuff.c from, copy)) from 2615 net/core/skbuff.c from += copy; from 2903 net/core/skbuff.c skb_zerocopy_headlen(const struct sk_buff *from) from 2907 net/core/skbuff.c if (!from->head_frag || from 2908 net/core/skbuff.c skb_headlen(from) < L1_CACHE_BYTES || from 2909 net/core/skbuff.c skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) from 2910 net/core/skbuff.c hlen = skb_headlen(from); from 2912 net/core/skbuff.c if (skb_has_frag_list(from)) from 2913 net/core/skbuff.c hlen = from->len; from 2938 net/core/skbuff.c skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) from 2946 net/core/skbuff.c BUG_ON(!from->head_frag && !hlen); from 2950 net/core/skbuff.c return skb_copy_bits(from, 0, skb_put(to, len), len); from 2953 net/core/skbuff.c ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); from 2958 net/core/skbuff.c plen = min_t(int, skb_headlen(from), len); from 2960 net/core/skbuff.c page = virt_to_head_page(from->head); from 2961 net/core/skbuff.c offset = from->data - (unsigned char *)page_address(page); from 2973 net/core/skbuff.c if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { from 2974 net/core/skbuff.c skb_tx_error(from); from 2977 net/core/skbuff.c skb_zerocopy_clone(to, from, GFP_ATOMIC); from 2979 net/core/skbuff.c for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { from 2984 net/core/skbuff.c skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; from 3301 net/core/skbuff.c int from, to, merge, todo; from 3312 net/core/skbuff.c from = 0; from 3314 net/core/skbuff.c fragfrom = &skb_shinfo(skb)->frags[from]; from 3333 net/core/skbuff.c fragfrom = &skb_shinfo(skb)->frags[from]; from 3343 net/core/skbuff.c from++; from 3348 net/core/skbuff.c (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) from 3354 net/core/skbuff.c while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { from 3358 net/core/skbuff.c fragfrom = &skb_shinfo(skb)->frags[from]; from 3364 net/core/skbuff.c from++; from 3395 net/core/skbuff.c while (from < skb_shinfo(skb)->nr_frags) from 3396 net/core/skbuff.c skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; from 3429 net/core/skbuff.c void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, from 3432 net/core/skbuff.c st->lower_offset = from; from 3572 net/core/skbuff.c unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, from 3581 net/core/skbuff.c skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); from 3584 net/core/skbuff.c return (ret <= to - from ? ret : UINT_MAX); from 5026 net/core/skbuff.c bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, from 5030 net/core/skbuff.c int i, delta, len = from->len; from 5039 net/core/skbuff.c BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); from 5045 net/core/skbuff.c from_shinfo = skb_shinfo(from); from 5048 net/core/skbuff.c if (skb_zcopy(to) || skb_zcopy(from)) from 5051 net/core/skbuff.c if (skb_headlen(from) != 0) { from 5059 net/core/skbuff.c if (skb_head_is_locked(from)) from 5062 net/core/skbuff.c delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); from 5064 net/core/skbuff.c page = virt_to_head_page(from->head); from 5065 net/core/skbuff.c offset = from->data - (unsigned char *)page_address(page); from 5068 net/core/skbuff.c page, offset, skb_headlen(from)); from 5075 net/core/skbuff.c delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); from 5085 net/core/skbuff.c if (!skb_cloned(from)) from 298 net/core/skmsg.c int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, from 315 net/core/skmsg.c copied = iov_iter_get_pages(from, pages, bytes, maxpages, from 322 net/core/skmsg.c iov_iter_advance(from, copied); from 351 net/core/skmsg.c iov_iter_revert(from, msg->sg.size - orig); from 356 net/core/skmsg.c int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, from 380 net/core/skmsg.c ret = copy_from_iter_nocache(to, copy, from); from 382 net/core/skmsg.c ret = copy_from_iter(to, copy, from); from 426 net/core/utils.c __be32 from, __be32 to, bool pseudohdr) from 429 net/core/utils.c csum_replace4(sum, from, to); from 432 net/core/utils.c (__force __wsum)from), from 436 net/core/utils.c (__force __wsum)from), from 459 net/core/utils.c const __be32 *from, const __be32 *to, from 463 net/core/utils.c ~from[0], ~from[1], ~from[2], ~from[3], from 550 net/dccp/feat.c int dccp_feat_clone_list(struct list_head const *from, struct list_head *to) from 555 net/dccp/feat.c list_for_each_entry(entry, from, node) { from 405 net/dccp/options.c const unsigned char *tail, *from; from 430 net/dccp/options.c from = av->av_buf + av->av_buf_head; from 449 net/dccp/options.c if (from + copylen > tail) { from 450 net/dccp/options.c const u16 tailsize = tail - from; from 452 net/dccp/options.c memcpy(to, from, tailsize); from 456 net/dccp/options.c from = av->av_buf; from 459 net/dccp/options.c memcpy(to, from, copylen); from 460 net/dccp/options.c from += copylen; from 347 net/ipv4/icmp.c static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd, from 350 net/ipv4/icmp.c struct icmp_bxm *icmp_param = (struct icmp_bxm *)from; from 544 net/ipv4/ip_output.c static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) from 546 net/ipv4/ip_output.c to->pkt_type = from->pkt_type; from 547 net/ipv4/ip_output.c to->priority = from->priority; from 548 net/ipv4/ip_output.c to->protocol = from->protocol; from 549 net/ipv4/ip_output.c to->skb_iif = from->skb_iif; from 551 net/ipv4/ip_output.c skb_dst_copy(to, from); from 552 net/ipv4/ip_output.c to->dev = from->dev; from 553 net/ipv4/ip_output.c to->mark = from->mark; from 555 net/ipv4/ip_output.c skb_copy_hash(to, from); from 558 net/ipv4/ip_output.c to->tc_index = from->tc_index; from 560 net/ipv4/ip_output.c nf_copy(to, from); from 561 net/ipv4/ip_output.c skb_ext_copy(to, from); from 563 net/ipv4/ip_output.c to->ipvs_property = from->ipvs_property; from 565 net/ipv4/ip_output.c skb_copy_secmark(to, from); from 666 net/ipv4/ip_output.c static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to, from 670 net/ipv4/ip_output.c IPCB(to)->flags = IPCB(from)->flags; from 679 net/ipv4/ip_output.c ip_options_fragment(from); from 925 net/ipv4/ip_output.c ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) from 927 net/ipv4/ip_output.c struct msghdr *msg = from; from 958 net/ipv4/ip_output.c int getfrag(void *from, char *to, int offset, from 960 net/ipv4/ip_output.c void *from, int length, int transhdrlen, from 1133 net/ipv4/ip_output.c if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { from 1175 net/ipv4/ip_output.c if (getfrag(from, skb_put(skb, copy), from 1200 net/ipv4/ip_output.c if (getfrag(from, from 1212 net/ipv4/ip_output.c err = skb_zerocopy_iter_dgram(skb, from, copy); from 1297 net/ipv4/ip_output.c int getfrag(void *from, char *to, int offset, int len, from 1299 net/ipv4/ip_output.c void *from, int length, int transhdrlen, from 1319 net/ipv4/ip_output.c from, length, transhdrlen, flags); from 1607 net/ipv4/ip_output.c int getfrag(void *from, char *to, int offset, from 1609 net/ipv4/ip_output.c void *from, int length, int transhdrlen, from 1630 net/ipv4/ip_output.c from, length, transhdrlen, flags); from 63 net/ipv4/netfilter/nf_nat_snmp_basic_main.c __be32 from; from 73 net/ipv4/netfilter/nf_nat_snmp_basic_main.c memcpy(&s[1], &ctx->from, 4); from 83 net/ipv4/netfilter/nf_nat_snmp_basic_main.c memcpy(&s[0], &ctx->from, 4); from 114 net/ipv4/netfilter/nf_nat_snmp_basic_main.c if (*pdata == ctx->from) { from 116 net/ipv4/netfilter/nf_nat_snmp_basic_main.c (void *)&ctx->from, (void *)&ctx->to); from 136 net/ipv4/netfilter/nf_nat_snmp_basic_main.c ctx.from = ct->tuplehash[dir].tuple.src.u3.ip; from 139 net/ipv4/netfilter/nf_nat_snmp_basic_main.c ctx.from = ct->tuplehash[!dir].tuple.src.u3.ip; from 143 net/ipv4/netfilter/nf_nat_snmp_basic_main.c if (ctx.from == ctx.to) from 597 net/ipv4/ping.c int ping_getfrag(void *from, char *to, from 600 net/ipv4/ping.c struct pingfakehdr *pfh = (struct pingfakehdr *)from; from 467 net/ipv4/raw.c static int raw_getfrag(void *from, char *to, int offset, int len, int odd, from 470 net/ipv4/raw.c struct raw_frag_vec *rfv = from; from 37 net/ipv4/raw_diag.c static struct sock *raw_lookup(struct net *net, struct sock *from, from 44 net/ipv4/raw_diag.c sk = __raw_v4_lookup(net, from, r->sdiag_raw_protocol, from 50 net/ipv4/raw_diag.c sk = __raw_v6_lookup(net, from, r->sdiag_raw_protocol, from 1378 net/ipv4/tcp_input.c int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, from 1390 net/ipv4/tcp_input.c return skb_shift(to, from, shiftlen); from 4417 net/ipv4/tcp_input.c struct sk_buff *from, from 4425 net/ipv4/tcp_input.c if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) from 4429 net/ipv4/tcp_input.c if (from->decrypted != to->decrypted) from 4433 net/ipv4/tcp_input.c if (!skb_try_coalesce(to, from, fragstolen, &delta)) from 4439 net/ipv4/tcp_input.c TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; from 4440 net/ipv4/tcp_input.c TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; from 4441 net/ipv4/tcp_input.c TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags; from 4443 net/ipv4/tcp_input.c if (TCP_SKB_CB(from)->has_rxtstamp) { from 4445 net/ipv4/tcp_input.c to->tstamp = from->tstamp; from 4446 net/ipv4/tcp_input.c skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp; from 4454 net/ipv4/tcp_input.c struct sk_buff *from, from 4457 net/ipv4/tcp_input.c bool res = tcp_try_coalesce(sk, to, from, fragstolen); from 4462 net/ipv4/tcp_input.c max_t(u16, 1, skb_shinfo(from)->gso_segs); from 327 net/ipv6/exthdrs.c __be32 from, to; from 337 net/ipv6/exthdrs.c from = *(__be32 *)hdr; from 344 net/ipv6/exthdrs.c update_csum_diff4(skb, from, to); from 294 net/ipv6/icmp.c static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) from 296 net/ipv6/icmp.c struct icmpv6_msg *msg = (struct icmpv6_msg *) from; from 84 net/ipv6/ila/ila.h static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to) from 87 net/ipv6/ila/ila.h ~from[0], ~from[1], to[0], to[1], from 924 net/ipv6/ip6_fib.c if (pcpu_rt && rcu_access_pointer(pcpu_rt->from) == match) { from 925 net/ipv6/ip6_fib.c struct fib6_info *from; from 927 net/ipv6/ip6_fib.c from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL); from 928 net/ipv6/ip6_fib.c fib6_info_release(from); from 934 net/ipv6/ip6_fib.c struct fib6_info *from; from 942 net/ipv6/ip6_fib.c __fib6_drop_pcpu_from(nh, arg->from, arg->table); from 957 net/ipv6/ip6_fib.c .from = f6i, from 579 net/ipv6/ip6_output.c static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) from 581 net/ipv6/ip6_output.c to->pkt_type = from->pkt_type; from 582 net/ipv6/ip6_output.c to->priority = from->priority; from 583 net/ipv6/ip6_output.c to->protocol = from->protocol; from 585 net/ipv6/ip6_output.c skb_dst_set(to, dst_clone(skb_dst(from))); from 586 net/ipv6/ip6_output.c to->dev = from->dev; from 587 net/ipv6/ip6_output.c to->mark = from->mark; from 589 net/ipv6/ip6_output.c skb_copy_hash(to, from); from 592 net/ipv6/ip6_output.c to->tc_index = from->tc_index; from 594 net/ipv6/ip6_output.c nf_copy(to, from); from 595 net/ipv6/ip6_output.c skb_ext_copy(to, from); from 596 net/ipv6/ip6_output.c skb_copy_secmark(to, from); from 1020 net/ipv6/ip6_output.c struct fib6_info *from; from 1029 net/ipv6/ip6_output.c from = rt ? rcu_dereference(rt->from) : NULL; from 1030 net/ipv6/ip6_output.c err = ip6_route_get_saddr(net, from, &fl6->daddr, from 1318 net/ipv6/ip6_output.c int getfrag(void *from, char *to, int offset, from 1320 net/ipv6/ip6_output.c void *from, int length, int transhdrlen, from 1553 net/ipv6/ip6_output.c getfrag(from, data + transhdrlen, offset, from 1596 net/ipv6/ip6_output.c if (getfrag(from, skb_put(skb, copy), from 1621 net/ipv6/ip6_output.c if (getfrag(from, from 1633 net/ipv6/ip6_output.c err = skb_zerocopy_iter_dgram(skb, from, copy); from 1657 net/ipv6/ip6_output.c int getfrag(void *from, char *to, int offset, int len, from 1659 net/ipv6/ip6_output.c void *from, int length, int transhdrlen, from 1689 net/ipv6/ip6_output.c from, length, transhdrlen, flags, ipc6); from 1844 net/ipv6/ip6_output.c int getfrag(void *from, char *to, int offset, from 1846 net/ipv6/ip6_output.c void *from, int length, int transhdrlen, from 1875 net/ipv6/ip6_output.c ¤t->task_frag, getfrag, from, from 735 net/ipv6/raw.c static int raw6_getfrag(void *from, char *to, int offset, int len, int odd, from 738 net/ipv6/raw.c struct raw6_frag_vec *rfv = from; from 370 net/ipv6/route.c struct fib6_info *from; from 382 net/ipv6/route.c from = xchg((__force struct fib6_info **)&rt->from, NULL); from 383 net/ipv6/route.c fib6_info_release(from); from 413 net/ipv6/route.c struct fib6_info *from; from 415 net/ipv6/route.c from = rcu_dereference(rt->from); from 420 net/ipv6/route.c } else if (from) { from 422 net/ipv6/route.c fib6_check_expired(from); from 1122 net/ipv6/route.c static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) from 1125 net/ipv6/route.c rcu_assign_pointer(rt->from, from); from 1126 net/ipv6/route.c ip_dst_init_metrics(&rt->dst, from->fib6_metrics); from 1440 net/ipv6/route.c struct fib6_info *from; from 1442 net/ipv6/route.c from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL); from 1443 net/ipv6/route.c fib6_info_release(from); from 1459 net/ipv6/route.c struct fib6_info *from; from 1471 net/ipv6/route.c from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL); from 1472 net/ipv6/route.c fib6_info_release(from); from 1738 net/ipv6/route.c static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from) from 1752 net/ipv6/route.c if (!from) from 1757 net/ipv6/route.c if (!from || from 1758 net/ipv6/route.c rcu_access_pointer(rt6_ex->rt6i->from) == from) from 1761 net/ipv6/route.c WARN_ON_ONCE(!from && bucket->depth); from 1889 net/ipv6/route.c struct fib6_info *from; from 1891 net/ipv6/route.c from = rcu_dereference(rt->from); from 1892 net/ipv6/route.c if (!from || !(rt->rt6i_flags & RTF_CACHE)) from 1895 net/ipv6/route.c if (from->nh) { from 1898 net/ipv6/route.c .plen = from->fib6_src.plen from 1903 net/ipv6/route.c rc = nexthop_for_each_fib6_nh(from->nh, from 1909 net/ipv6/route.c return fib6_nh_remove_exception(from->fib6_nh, from 1910 net/ipv6/route.c from->fib6_src.plen, rt); from 1964 net/ipv6/route.c struct fib6_info *from; from 1969 net/ipv6/route.c from = rcu_dereference(rt->from); from 1970 net/ipv6/route.c if (!from || !(rt->rt6i_flags & RTF_CACHE)) from 1973 net/ipv6/route.c if (from->nh) { from 1979 net/ipv6/route.c nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg); from 1985 net/ipv6/route.c fib6_nh = from->fib6_nh; from 1987 net/ipv6/route.c fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt); from 2589 net/ipv6/route.c struct fib6_info *from, from 2594 net/ipv6/route.c if (!from || !fib6_get_cookie_safe(from, &rt_cookie) || from 2605 net/ipv6/route.c struct fib6_info *from, from 2610 net/ipv6/route.c fib6_check(from, cookie)) from 2619 net/ipv6/route.c struct fib6_info *from; from 2634 net/ipv6/route.c from = rcu_dereference(rt->from); from 2636 net/ipv6/route.c if (from && (rt->rt6i_flags & RTF_PCPU || from 2638 net/ipv6/route.c dst_ret = rt6_dst_from_check(rt, from, cookie); from 2640 net/ipv6/route.c dst_ret = rt6_check(rt, from, cookie); from 2679 net/ipv6/route.c struct fib6_info *from; from 2682 net/ipv6/route.c from = rcu_dereference(rt->from); from 2683 net/ipv6/route.c if (from) { from 2684 net/ipv6/route.c fn = rcu_dereference(from->fib6_node); from 2696 net/ipv6/route.c struct fib6_info *from; from 2699 net/ipv6/route.c from = rcu_dereference(rt0->from); from 2700 net/ipv6/route.c if (from) from 2701 net/ipv6/route.c rt0->dst.expires = from->expires; from 2721 net/ipv6/route.c (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from)); from 2764 net/ipv6/route.c res.f6i = rcu_dereference(rt6->from); from 4066 net/ipv6/route.c res.f6i = rcu_dereference(rt->from); from 5802 net/ipv6/route.c struct fib6_info *from; from 5913 net/ipv6/route.c from = rcu_dereference(rt->from); from 5914 net/ipv6/route.c if (from) { from 5916 net/ipv6/route.c err = rt6_fill_node(net, skb, from, NULL, NULL, NULL, from 5921 net/ipv6/route.c err = rt6_fill_node(net, skb, from, dst, &fl6.daddr, from 1560 net/mac80211/trace.h #define SWITCH_ENTRY_ASSIGN(to, from) local_vifs[i].to = vifs[i].from from 1216 net/netfilter/ipset/ip_set_core.c struct ip_set *from, *to; from 1225 net/netfilter/ipset/ip_set_core.c from = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), from 1227 net/netfilter/ipset/ip_set_core.c if (!from) from 1239 net/netfilter/ipset/ip_set_core.c if (!(from->type->features == to->type->features && from 1240 net/netfilter/ipset/ip_set_core.c from->family == to->family)) from 1245 net/netfilter/ipset/ip_set_core.c if (from->ref_netlink || to->ref_netlink) { from 1250 net/netfilter/ipset/ip_set_core.c strncpy(from_name, from->name, IPSET_MAXNAMELEN); from 1251 net/netfilter/ipset/ip_set_core.c strncpy(from->name, to->name, IPSET_MAXNAMELEN); from 1254 net/netfilter/ipset/ip_set_core.c swap(from->ref, to->ref); from 1256 net/netfilter/ipset/ip_set_core.c ip_set(inst, to_id) = from; from 172 net/netfilter/ipset/pfxlen.c ip_set_range_to_cidr(u32 from, u32 to, u8 *cidr) from 178 net/netfilter/ipset/pfxlen.c if ((from & ip_set_hostmask(i)) != from) from 180 net/netfilter/ipset/pfxlen.c last = from | ~ip_set_hostmask(i); from 187 net/netfilter/ipset/pfxlen.c return from; from 254 net/netfilter/ipvs/ip_vs_ftp.c union nf_inet_addr from; from 284 net/netfilter/ipvs/ip_vs_ftp.c &from, &port, cp->af, from 289 net/netfilter/ipvs/ip_vs_ftp.c &from.ip, ntohs(port), &cp->caddr.ip, 0); from 300 net/netfilter/ipvs/ip_vs_ftp.c from = cp->daddr; from 305 net/netfilter/ipvs/ip_vs_ftp.c &from, &port, cp->af, from 310 net/netfilter/ipvs/ip_vs_ftp.c IP_VS_DBG_ADDR(cp->af, &from), ntohs(port), from 321 net/netfilter/ipvs/ip_vs_ftp.c ipvsh->protocol, &from, port, from 331 net/netfilter/ipvs/ip_vs_ftp.c n_cp = ip_vs_conn_new(&p, cp->af, &from, port, from 344 net/netfilter/ipvs/ip_vs_ftp.c from.ip = n_cp->vaddr.ip; from 347 net/netfilter/ipvs/ip_vs_ftp.c ((unsigned char *)&from.ip)[0], from 348 net/netfilter/ipvs/ip_vs_ftp.c ((unsigned char *)&from.ip)[1], from 349 net/netfilter/ipvs/ip_vs_ftp.c ((unsigned char *)&from.ip)[2], from 350 net/netfilter/ipvs/ip_vs_ftp.c ((unsigned char *)&from.ip)[3], from 354 net/netfilter/ipvs/ip_vs_ftp.c from = n_cp->vaddr; from 92 net/netfilter/xt_connbytes.c if (sinfo->count.to >= sinfo->count.from) from 93 net/netfilter/xt_connbytes.c return what <= sinfo->count.to && what >= sinfo->count.from; from 95 net/netfilter/xt_connbytes.c return what < sinfo->count.to || what > sinfo->count.from; from 139 net/netfilter/xt_hashlimit.c cfg_copy(struct hashlimit_cfg3 *to, const void *from, int revision) from 142 net/netfilter/xt_hashlimit.c struct hashlimit_cfg1 *cfg = (struct hashlimit_cfg1 *)from; from 154 net/netfilter/xt_hashlimit.c struct hashlimit_cfg2 *cfg = (struct hashlimit_cfg2 *)from; from 166 net/netfilter/xt_hashlimit.c memcpy(to, from, sizeof(struct hashlimit_cfg3)); from 2938 net/openvswitch/flow_netlink.c static int copy_action(const struct nlattr *from, from 2941 net/openvswitch/flow_netlink.c int totlen = NLA_ALIGN(from->nla_len); from 2944 net/openvswitch/flow_netlink.c to = reserve_sfa_size(sfa, from->nla_len, log); from 2948 net/openvswitch/flow_netlink.c memcpy(to, from, totlen); from 132 net/qrtr/qrtr.c int type, struct sockaddr_qrtr *from, from 135 net/qrtr/qrtr.c int type, struct sockaddr_qrtr *from, from 176 net/qrtr/qrtr.c int type, struct sockaddr_qrtr *from, from 186 net/qrtr/qrtr.c hdr->src_node_id = cpu_to_le32(from->sq_node); from 187 net/qrtr/qrtr.c hdr->src_port_id = cpu_to_le32(from->sq_port); from 670 net/qrtr/qrtr.c int type, struct sockaddr_qrtr *from, from 683 net/qrtr/qrtr.c cb->src_node = from->sq_node; from 684 net/qrtr/qrtr.c cb->src_port = from->sq_port; from 699 net/qrtr/qrtr.c int type, struct sockaddr_qrtr *from, from 710 net/qrtr/qrtr.c qrtr_node_enqueue(node, skbn, type, from, to); from 714 net/qrtr/qrtr.c qrtr_local_enqueue(NULL, skb, type, from, to); from 75 net/qrtr/tun.c static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from) from 79 net/qrtr/tun.c size_t len = iov_iter_count(from); from 87 net/qrtr/tun.c if (!copy_from_iter_full(kbuf, len, from)) { from 79 net/rds/ib_recv.c static void list_splice_entire_tail(struct list_head *from, from 82 net/rds/ib_recv.c struct list_head *from_last = from->prev; from 363 net/rds/message.c static int rds_message_zcopy_from_user(struct rds_message *rm, struct iov_iter *from) from 367 net/rds/message.c int length = iov_iter_count(from); from 371 net/rds/message.c rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from)); from 388 net/rds/message.c while (iov_iter_count(from)) { from 393 net/rds/message.c copied = iov_iter_get_pages(from, &pages, PAGE_SIZE, from 407 net/rds/message.c iov_iter_advance(from, copied); from 421 net/rds/message.c int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from, from 429 net/rds/message.c rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from)); from 436 net/rds/message.c return rds_message_zcopy_from_user(rm, from); from 438 net/rds/message.c while (iov_iter_count(from)) { from 440 net/rds/message.c ret = rds_page_remainder_alloc(sg, iov_iter_count(from), from 448 net/rds/message.c to_copy = min_t(unsigned long, iov_iter_count(from), from 453 net/rds/message.c to_copy, from); from 853 net/rds/rds.h int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from, from 642 net/rxrpc/af_rxrpc.c rx->service_upgrade.from != 0) from 657 net/rxrpc/af_rxrpc.c rx->service_upgrade.from = service_upgrade[0]; from 154 net/rxrpc/ar-internal.h u16 from; /* Service ID to upgrade (if not 0) */ from 177 net/rxrpc/conn_service.c conn->service_id == rx->service_upgrade.from) from 32 net/sched/em_text.c int from, to; from 34 net/sched/em_text.c from = tcf_get_base_ptr(skb, tm->from_layer) - skb->data; from 35 net/sched/em_text.c from += tm->from_offset; from 40 net/sched/em_text.c return skb_find_text(skb, from, to, tm->config) != UINT_MAX; from 736 net/sched/sch_qfq.c static inline unsigned long mask_from(unsigned long bitmap, int from) from 738 net/sched/sch_qfq.c return bitmap & ~((1UL << from) - 1); from 150 net/sctp/chunk.c struct iov_iter *from) from 153 net/sctp/chunk.c size_t msg_len = iov_iter_count(from); from 271 net/sctp/chunk.c err = sctp_user_addto_chunk(chunk, len, from); from 101 net/sctp/diag.c struct sctp_transport *from; from 111 net/sctp/diag.c list_for_each_entry(from, &asoc->peer.transport_addr_list, from 113 net/sctp/diag.c memcpy(info, &from->ipaddr, sizeof(from->ipaddr)); from 114 net/sctp/diag.c memset(info + sizeof(from->ipaddr), 0, from 115 net/sctp/diag.c addrlen - sizeof(from->ipaddr)); from 1512 net/sctp/sm_make_chunk.c struct iov_iter *from) from 1520 net/sctp/sm_make_chunk.c if (!copy_from_iter_full(target, len, from)) from 6168 net/sctp/socket.c struct sctp_transport *from; from 6190 net/sctp/socket.c list_for_each_entry(from, &asoc->peer.transport_addr_list, from 6192 net/sctp/socket.c memcpy(&temp, &from->ipaddr, sizeof(temp)); from 113 net/socket.c static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from); from 972 net/socket.c static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from) from 976 net/socket.c struct msghdr msg = {.msg_iter = *from, from 990 net/socket.c *from = msg.msg_iter; from 1126 net/tipc/link.c u16 from, u16 to, struct sk_buff_head *xmitq) from 1136 net/tipc/link.c if (less(to, from)) from 1139 net/tipc/link.c trace_tipc_link_retrans(r, from, to, &l->transmq); from 1146 net/tipc/link.c if (less(msg_seqno(hdr), from)) from 2166 net/tipc/link.c u16 from = msg_bcast_ack(hdr) + 1; from 2167 net/tipc/link.c u16 to = from + msg_bc_gap(hdr) - 1; from 2189 net/tipc/link.c rc = tipc_link_bc_retrans(snd_l, l, from, to, xmitq); from 2270 net/tipc/link.c u16 from = acked + 1; from 2285 net/tipc/link.c rc = tipc_link_bc_retrans(l->bc_sndlink, l, from, to, xmitq); from 2291 net/tipc/link.c if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from)) from 290 net/tipc/trace.h __field(u16, from) from 299 net/tipc/trace.h __entry->from = f; from 307 net/tipc/trace.h __entry->name, __entry->from, __entry->to, from 320 net/tipc/trace.h __entry->name, __entry->from, __entry->to, from 545 net/tls/tls_sw.c static int tls_split_open_record(struct sock *sk, struct tls_rec *from, from 631 net/tls/tls_sw.c struct tls_rec *from, u32 orig_end) from 633 net/tls/tls_sw.c struct sk_msg *msg_npl = &from->msg_plaintext; from 658 net/tls/tls_sw.c sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted); from 660 net/tls/tls_sw.c kfree(from); from 1324 net/tls/tls_sw.c static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from, from 1343 net/tls/tls_sw.c copied = iov_iter_get_pages(from, pages, from 1351 net/tls/tls_sw.c iov_iter_advance(from, copied); from 1375 net/tls/tls_sw.c iov_iter_revert(from, size - *size_used); from 2843 net/wireless/trace.h MAC_ENTRY(from) from 2850 net/wireless/trace.h MAC_ASSIGN(from, eth_hdr(skb)->h_source); from 2855 net/wireless/trace.h NETDEV_PR_ARG, __entry->len, MAC_PR_ARG(from), from 17 net/x25/x25_forward.c int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from, from 41 net/x25/x25_forward.c if (rt->dev == from->dev) { from 67 net/x25/x25_forward.c new_frwd->dev2 = from->dev; from 92 net/x25/x25_forward.c int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) { from 106 net/x25/x25_forward.c if (from->dev == frwd->dev1) { from 188 samples/vfs/test-statx.c static void dump_hex(unsigned long long *data, int from, int to) from 192 samples/vfs/test-statx.c from /= 8; from 195 samples/vfs/test-statx.c for (offset = from; offset < to; offset++) { from 1468 scripts/mod/modpost.c const char *from, *from_p; from 1475 scripts/mod/modpost.c get_pretty_name(from_is_func, &from, &from_p); from 1480 scripts/mod/modpost.c modname, fromsec, fromaddr, from, fromsym, from_p, to, tosec, from 1542 scripts/mod/modpost.c from, prl_from, fromsym, from_p, from 1559 scripts/mod/modpost.c from, prl_from, fromsym, from_p, from 1576 scripts/mod/modpost.c from, prl_from, fromsym, from_p, from 1605 scripts/mod/modpost.c Elf_Sym *from; from 1609 scripts/mod/modpost.c from = find_elf_symbol2(elf, r->r_offset, fromsec); from 1610 scripts/mod/modpost.c fromsym = sym_name(elf, from); from 1624 scripts/mod/modpost.c is_function(from), tosec, tosym, from 894 security/integrity/ima/ima_policy.c char *from; from 976 security/integrity/ima/ima_policy.c ima_log_string(ab, "func", args[0].from); from 981 security/integrity/ima/ima_policy.c if (strcmp(args[0].from, "FILE_CHECK") == 0) from 984 security/integrity/ima/ima_policy.c else if (strcmp(args[0].from, "PATH_CHECK") == 0) from 986 security/integrity/ima/ima_policy.c else if (strcmp(args[0].from, "MODULE_CHECK") == 0) from 988 security/integrity/ima/ima_policy.c else if (strcmp(args[0].from, "FIRMWARE_CHECK") == 0) from 990 security/integrity/ima/ima_policy.c else if ((strcmp(args[0].from, "FILE_MMAP") == 0) from 991 security/integrity/ima/ima_policy.c || (strcmp(args[0].from, "MMAP_CHECK") == 0)) from 993 security/integrity/ima/ima_policy.c else if (strcmp(args[0].from, "BPRM_CHECK") == 0) from 995 security/integrity/ima/ima_policy.c else if (strcmp(args[0].from, "CREDS_CHECK") == 0) from 997 security/integrity/ima/ima_policy.c else if (strcmp(args[0].from, "KEXEC_KERNEL_CHECK") == from 1000 security/integrity/ima/ima_policy.c else if (strcmp(args[0].from, "KEXEC_INITRAMFS_CHECK") from 1003 security/integrity/ima/ima_policy.c else if (strcmp(args[0].from, "POLICY_CHECK") == 0) from 1005 security/integrity/ima/ima_policy.c else if (strcmp(args[0].from, "KEXEC_CMDLINE") == 0) from 1013 security/integrity/ima/ima_policy.c ima_log_string(ab, "mask", args[0].from); from 1018 security/integrity/ima/ima_policy.c from = args[0].from; from 1019 security/integrity/ima/ima_policy.c if (*from == '^') from 1020 security/integrity/ima/ima_policy.c from++; from 1022 security/integrity/ima/ima_policy.c if ((strcmp(from, "MAY_EXEC")) == 0) from 1024 security/integrity/ima/ima_policy.c else if (strcmp(from, "MAY_WRITE") == 0) from 1026 security/integrity/ima/ima_policy.c else if (strcmp(from, "MAY_READ") == 0) from 1028 security/integrity/ima/ima_policy.c else if (strcmp(from, "MAY_APPEND") == 0) from 1033 security/integrity/ima/ima_policy.c entry->flags |= (*args[0].from == '^') from 1037 security/integrity/ima/ima_policy.c ima_log_string(ab, "fsmagic", args[0].from); from 1044 security/integrity/ima/ima_policy.c result = kstrtoul(args[0].from, 16, &entry->fsmagic); from 1049 security/integrity/ima/ima_policy.c ima_log_string(ab, "fsname", args[0].from); from 1051 security/integrity/ima/ima_policy.c entry->fsname = kstrdup(args[0].from, GFP_KERNEL); from 1060 security/integrity/ima/ima_policy.c ima_log_string(ab, "fsuuid", args[0].from); from 1067 security/integrity/ima/ima_policy.c result = uuid_parse(args[0].from, &entry->fsuuid); from 1087 security/integrity/ima/ima_policy.c args[0].from, entry->uid_op); from 1094 security/integrity/ima/ima_policy.c result = kstrtoul(args[0].from, 10, &lnum); from 1114 security/integrity/ima/ima_policy.c ima_log_string_op(ab, "fowner", args[0].from, from 1122 security/integrity/ima/ima_policy.c result = kstrtoul(args[0].from, 10, &lnum); from 1132 security/integrity/ima/ima_policy.c ima_log_string(ab, "obj_user", args[0].from); from 1138 security/integrity/ima/ima_policy.c ima_log_string(ab, "obj_role", args[0].from); from 1144 security/integrity/ima/ima_policy.c ima_log_string(ab, "obj_type", args[0].from); from 1150 security/integrity/ima/ima_policy.c ima_log_string(ab, "subj_user", args[0].from); from 1156 security/integrity/ima/ima_policy.c ima_log_string(ab, "subj_role", args[0].from); from 1162 security/integrity/ima/ima_policy.c ima_log_string(ab, "subj_type", args[0].from); from 1173 security/integrity/ima/ima_policy.c ima_log_string(ab, "appraise_type", args[0].from); from 1174 security/integrity/ima/ima_policy.c if ((strcmp(args[0].from, "imasig")) == 0) from 1177 security/integrity/ima/ima_policy.c strcmp(args[0].from, "imasig|modsig") == 0) from 1191 security/integrity/ima/ima_policy.c ima_log_string(ab, "pcr", args[0].from); from 1193 security/integrity/ima/ima_policy.c result = kstrtoint(args[0].from, 10, &entry->pcr); from 1201 security/integrity/ima/ima_policy.c ima_log_string(ab, "template", args[0].from); from 1206 security/integrity/ima/ima_policy.c template_desc = lookup_template_desc(args[0].from); from 30 security/keys/compat.c struct iov_iter from; from 38 security/keys/compat.c &from); from 42 security/keys/compat.c ret = keyctl_instantiate_key_common(id, &from, ringid); from 1163 security/keys/keyctl.c struct iov_iter *from, from 1169 security/keys/keyctl.c size_t plen = from ? iov_iter_count(from) : 0; from 1176 security/keys/keyctl.c from = NULL; from 1196 security/keys/keyctl.c if (from) { from 1203 security/keys/keyctl.c if (!copy_from_iter_full(payload, plen, from)) from 1246 security/keys/keyctl.c struct iov_iter from; from 1250 security/keys/keyctl.c &iov, &from); from 1254 security/keys/keyctl.c return keyctl_instantiate_key_common(id, &from, ringid); from 1275 security/keys/keyctl.c struct iov_iter from; from 1282 security/keys/keyctl.c ARRAY_SIZE(iovstack), &iov, &from); from 1285 security/keys/keyctl.c ret = keyctl_instantiate_key_common(id, &from, ringid); from 53 security/keys/keyctl_pkey.c q = args[0].from; from 774 security/keys/trusted.c opt->pcrinfo_len = strlen(args[0].from) / 2; from 777 security/keys/trusted.c res = hex2bin(opt->pcrinfo, args[0].from, from 783 security/keys/trusted.c res = kstrtoul(args[0].from, 16, &handle); from 790 security/keys/trusted.c if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE) from 792 security/keys/trusted.c res = hex2bin(opt->keyauth, args[0].from, from 798 security/keys/trusted.c if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE) from 800 security/keys/trusted.c res = hex2bin(opt->blobauth, args[0].from, from 806 security/keys/trusted.c if (*args[0].from == '0') from 812 security/keys/trusted.c res = kstrtoul(args[0].from, 10, &lock); from 821 security/keys/trusted.c if (!strcmp(args[0].from, hash_algo_name[i])) { from 835 security/keys/trusted.c if (!tpm2 || strlen(args[0].from) != (2 * digest_len)) from 837 security/keys/trusted.c res = hex2bin(opt->policydigest, args[0].from, from 846 security/keys/trusted.c res = kstrtoul(args[0].from, 16, &handle); from 111 security/security.c static void __init append_ordered_lsm(struct lsm_info *lsm, const char *from) from 117 security/security.c if (WARN(last_lsm == LSM_COUNT, "%s: out of LSM slots!?\n", from)) from 125 security/security.c init_debug("%s ordering: %s (%sabled)\n", from, lsm->name, from 678 security/security.c int security_binder_transaction(struct task_struct *from, from 681 security/security.c return call_int_hook(binder_transaction, 0, from, to); from 684 security/security.c int security_binder_transfer_binder(struct task_struct *from, from 687 security/security.c return call_int_hook(binder_transfer_binder, 0, from, to); from 690 security/security.c int security_binder_transfer_file(struct task_struct *from, from 693 security/security.c return call_int_hook(binder_transfer_file, 0, from, to, file); from 2055 security/selinux/hooks.c static int selinux_binder_transaction(struct task_struct *from, from 2059 security/selinux/hooks.c u32 fromsid = task_sid(from); from 2076 security/selinux/hooks.c static int selinux_binder_transfer_binder(struct task_struct *from, from 2079 security/selinux/hooks.c u32 fromsid = task_sid(from); from 2087 security/selinux/hooks.c static int selinux_binder_transfer_file(struct task_struct *from, from 2624 security/selinux/hooks.c char *from = options; from 2630 security/selinux/hooks.c int len = opt_len(from); from 2634 security/selinux/hooks.c token = match_opt_prefix(from, len, &arg); from 2641 security/selinux/hooks.c for (p = q = arg; p < from + len; p++) { from 2659 security/selinux/hooks.c from--; from 2662 security/selinux/hooks.c if (to != from) from 2663 security/selinux/hooks.c memmove(to, from, len); from 2667 security/selinux/hooks.c if (!from[len]) from 2669 security/selinux/hooks.c from += len + 1; from 721 security/smack/smack_lsm.c char *from = options, *to = options; from 725 security/smack/smack_lsm.c char *next = strchr(from, ','); from 730 security/smack/smack_lsm.c len = next - from; from 732 security/smack/smack_lsm.c len = strlen(from); from 734 security/smack/smack_lsm.c token = match_opt_prefix(from, len, &arg); from 736 security/smack/smack_lsm.c arg = kmemdup_nul(arg, from + len - arg, GFP_KERNEL); from 747 security/smack/smack_lsm.c from--; from 750 security/smack/smack_lsm.c if (to != from) from 751 security/smack/smack_lsm.c memmove(to, from, len); from 755 security/smack/smack_lsm.c if (!from[len]) from 757 security/smack/smack_lsm.c from += len + 1; from 3146 sound/core/pcm_native.c static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from) from 3163 sound/core/pcm_native.c if (!iter_is_iovec(from)) from 3165 sound/core/pcm_native.c if (from->nr_segs > 128 || from->nr_segs != runtime->channels || from 3166 sound/core/pcm_native.c !frame_aligned(runtime, from->iov->iov_len)) from 3168 sound/core/pcm_native.c frames = bytes_to_samples(runtime, from->iov->iov_len); from 3169 sound/core/pcm_native.c bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL); from 3172 sound/core/pcm_native.c for (i = 0; i < from->nr_segs; ++i) from 3173 sound/core/pcm_native.c bufs[i] = from->iov[i].iov_base; from 1133 sound/pci/cs46xx/dsp_spos_scb_lib.c find_next_free_scb (struct snd_cs46xx * chip, struct dsp_scb_descriptor * from) from 1136 sound/pci/cs46xx/dsp_spos_scb_lib.c struct dsp_scb_descriptor * scb = from; from 104 sound/sh/aica.c static void spu_memload(u32 toi, const void *from, int length) from 107 sound/sh/aica.c const u32 *froml = from; from 382 sound/soc/codecs/mt6358.c static void headset_volume_ramp(struct mt6358_priv *priv, int from, int to) from 386 sound/soc/codecs/mt6358.c if (!is_valid_hp_pga_idx(from) || !is_valid_hp_pga_idx(to)) from 388 sound/soc/codecs/mt6358.c __func__, from, to); from 391 sound/soc/codecs/mt6358.c __func__, from, to); from 393 sound/soc/codecs/mt6358.c if (to > from) from 394 sound/soc/codecs/mt6358.c offset = to - from; from 396 sound/soc/codecs/mt6358.c offset = from - to; from 399 sound/soc/codecs/mt6358.c if (to > from) from 400 sound/soc/codecs/mt6358.c reg_idx = from + count; from 402 sound/soc/codecs/mt6358.c reg_idx = from - count; from 313 sound/soc/codecs/nau8825.c unsigned int value, volume, ramp_up, from, to; from 319 sound/soc/codecs/nau8825.c from = vol_from; from 323 sound/soc/codecs/nau8825.c from = vol_to; from 330 sound/soc/codecs/nau8825.c for (volume = from; volume < to; volume += step) { from 334 sound/soc/codecs/nau8825.c value = to - volume + from; from 343 sound/soc/codecs/nau8825.c value = from; from 394 tools/include/uapi/linux/if_link.h __u32 from; from 1171 tools/include/uapi/linux/perf_event.h __u64 from; from 50 tools/lib/lockdep/include/liblockdep/common.h extern void debug_check_no_locks_freed(const void *from, unsigned long len); from 50 tools/perf/arch/x86/util/intel-bts.c u64 from; from 9 tools/perf/bench/mem-memcpy-x86-64-lib.c unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len); from 11 tools/perf/bench/mem-memcpy-x86-64-lib.c unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len) from 13 tools/perf/bench/mem-memcpy-x86-64-lib.c for (; len; --len, to++, from++) { from 18 tools/perf/bench/mem-memcpy-x86-64-lib.c unsigned long rem = __memcpy_mcsafe(to, from, 1); from 148 tools/perf/builtin-annotate.c process_basic_block(prev, &bi[i].from, &bi[i].flags); from 167 tools/perf/builtin-annotate.c err = addr_map_symbol__inc_samples(&bi->from, sample, evsel); from 54 tools/perf/builtin-buildid-cache.c char from[PATH_MAX]; from 60 tools/perf/builtin-buildid-cache.c scnprintf(from, sizeof(from), "%s/kallsyms", from_dir); from 64 tools/perf/builtin-buildid-cache.c err = kallsyms__get_function_start(from, name, &addr1); from 81 tools/perf/builtin-buildid-cache.c char from[PATH_MAX]; from 92 tools/perf/builtin-buildid-cache.c scnprintf(from, sizeof(from), "%s/modules", from_dir); from 104 tools/perf/builtin-buildid-cache.c if (!compare_proc_modules(from, to) && from 152 tools/perf/builtin-report.c err = addr_map_symbol__inc_samples(&bi->from, sample, evsel); from 190 tools/perf/builtin-report.c bi->from.addr, bi->to.addr); from 195 tools/perf/builtin-report.c err = addr_map_symbol__inc_samples(&bi->from, sample, evsel); from 739 tools/perf/builtin-script.c u64 i, from, to; from 746 tools/perf/builtin-script.c from = br->entries[i].from; from 752 tools/perf/builtin-script.c thread__find_map_fb(thread, sample->cpumode, from, &alf); from 756 tools/perf/builtin-script.c printed += fprintf(fp, " 0x%"PRIx64, from); from 786 tools/perf/builtin-script.c u64 i, from, to; from 796 tools/perf/builtin-script.c from = br->entries[i].from; from 799 tools/perf/builtin-script.c thread__find_symbol_fb(thread, sample->cpumode, from, &alf); from 831 tools/perf/builtin-script.c u64 i, from, to; from 841 tools/perf/builtin-script.c from = br->entries[i].from; from 844 tools/perf/builtin-script.c if (thread__find_map_fb(thread, sample->cpumode, from, &alf) && from 846 tools/perf/builtin-script.c from = map__map_ip(alf.map, from); from 852 tools/perf/builtin-script.c printed += fprintf(fp, " 0x%"PRIx64, from); from 1034 tools/perf/builtin-script.c len = grab_bb(buffer, br->entries[nr-1].from, from 1035 tools/perf/builtin-script.c br->entries[nr-1].from, from 1038 tools/perf/builtin-script.c printed += ip__fprintf_sym(br->entries[nr - 1].from, thread, from 1040 tools/perf/builtin-script.c printed += ip__fprintf_jump(br->entries[nr - 1].from, &br->entries[nr - 1], from 1043 tools/perf/builtin-script.c printed += print_srccode(thread, x.cpumode, br->entries[nr - 1].from); from 1048 tools/perf/builtin-script.c if (br->entries[i].from || br->entries[i].to) from 1050 tools/perf/builtin-script.c br->entries[i].from, from 1053 tools/perf/builtin-script.c end = br->entries[i].from; from 1058 tools/perf/builtin-script.c end = br->entries[--i].from; from 1095 tools/perf/builtin-script.c if (br->entries[0].from == sample->ip) from 1043 tools/perf/builtin-timechart.c int from = 0, to = 0; from 1053 tools/perf/builtin-timechart.c if (p->pid == we->waker && !from) { from 1054 tools/perf/builtin-timechart.c from = c->Y; from 1066 tools/perf/builtin-timechart.c if (p->pid == we->waker && !from) { from 1067 tools/perf/builtin-timechart.c from = c->Y; from 1091 tools/perf/builtin-timechart.c else if (from && to && abs(from - to) == 1) from 1092 tools/perf/builtin-timechart.c svg_wakeline(we->time, from, to, we->backtrace); from 1094 tools/perf/builtin-timechart.c svg_partial_wakeline(we->time, from, task_from, to, from 12 tools/perf/include/bpf/stdio.h #define puts(from) \ from 13 tools/perf/include/bpf/stdio.h ({ const int __len = sizeof(from); \ from 14 tools/perf/include/bpf/stdio.h char __from[__len] = from; \ from 16 tools/perf/include/bpf/stdio.h &__from, __len & (sizeof(from) - 1)); }) from 152 tools/perf/ui/browsers/annotate.c unsigned int from, to; from 194 tools/perf/ui/browsers/annotate.c from = cursor->al.idx_asm; from 197 tools/perf/ui/browsers/annotate.c from = (u64)cursor->al.idx; from 206 tools/perf/ui/browsers/annotate.c from, to); from 211 tools/perf/ui/browsers/annotate.c from - 1, from 212 tools/perf/ui/browsers/annotate.c to > from ? true : false); from 3114 tools/perf/ui/browsers/hists.c bi->from.map, from 3115 tools/perf/ui/browsers/hists.c bi->from.sym); from 3116 tools/perf/ui/browsers/hists.c if (bi->to.sym != bi->from.sym) from 19 tools/perf/util/branch.c u64 from, u64 to) from 21 tools/perf/util/branch.c if (flags->type == PERF_BR_UNKNOWN || from == 0) from 27 tools/perf/util/branch.c if (to > from) from 33 tools/perf/util/branch.c if (cross_area(from, to, AREA_2M)) from 35 tools/perf/util/branch.c else if (cross_area(from, to, AREA_4K)) from 27 tools/perf/util/branch.h struct addr_map_symbol from; from 35 tools/perf/util/branch.h u64 from; from 55 tools/perf/util/branch.h u64 from, u64 to); from 14 tools/perf/util/copyfile.c static int slow_copyfile(const char *from, const char *to, struct nsinfo *nsi) from 23 tools/perf/util/copyfile.c from_fp = fopen(from, "r"); from 73 tools/perf/util/copyfile.c static int copyfile_mode_ns(const char *from, const char *to, mode_t mode, from 83 tools/perf/util/copyfile.c err = stat(from, &st); from 105 tools/perf/util/copyfile.c err = slow_copyfile(from, tmp, nsi); from 115 tools/perf/util/copyfile.c fromfd = open(from, O_RDONLY); from 133 tools/perf/util/copyfile.c int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi) from 135 tools/perf/util/copyfile.c return copyfile_mode_ns(from, to, 0755, nsi); from 138 tools/perf/util/copyfile.c int copyfile_mode(const char *from, const char *to, mode_t mode) from 140 tools/perf/util/copyfile.c return copyfile_mode_ns(from, to, mode, NULL); from 143 tools/perf/util/copyfile.c int copyfile(const char *from, const char *to) from 145 tools/perf/util/copyfile.c return copyfile_mode(from, to, 0755); from 11 tools/perf/util/copyfile.h int copyfile(const char *from, const char *to); from 12 tools/perf/util/copyfile.h int copyfile_mode(const char *from, const char *to, mode_t mode); from 13 tools/perf/util/copyfile.h int copyfile_ns(const char *from, const char *to, struct nsinfo *nsi); from 978 tools/perf/util/cs-etm.c be->from = cs_etm__last_executed_instr(tidq->prev_packet); from 1206 tools/perf/util/cs-etm.c .from = sample.ip, from 112 tools/perf/util/hist.c if (h->branch_info->from.sym) { from 113 tools/perf/util/hist.c symlen = (int)h->branch_info->from.sym->namelen + 4; from 118 tools/perf/util/hist.c symlen = dso__name_len(h->branch_info->from.map->dso); from 443 tools/perf/util/hist.c map__get(he->branch_info->from.map); from 492 tools/perf/util/hist.c map__put(he->branch_info->from.map); from 908 tools/perf/util/hist.c if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym)) from 1247 tools/perf/util/hist.c map__zput(he->branch_info->from.map); from 2595 tools/perf/util/hist.c addr_map_symbol__account_cycles(&bi[i].from, from 81 tools/perf/util/intel-bts.c u64 from; from 111 tools/perf/util/intel-bts.c le64_to_cpu(branch->from), from 284 tools/perf/util/intel-bts.c sample.ip = le64_to_cpu(branch->from); from 367 tools/perf/util/intel-bts.c if (!branch->from) { from 379 tools/perf/util/intel-bts.c err = intel_bts_get_next_insn(btsq, branch->from); from 387 tools/perf/util/intel-bts.c branch->from); from 392 tools/perf/util/intel-bts.c if (!machine__kernel_ip(btsq->bts->machine, branch->from) && from 427 tools/perf/util/intel-bts.c if (!branch->from && !branch->to) from 432 tools/perf/util/intel-bts.c le64_to_cpu(branch->from), from 1174 tools/perf/util/intel-pt.c be->from = state->from_ip; from 1298 tools/perf/util/intel-pt.c .from = sample.ip, from 1680 tools/perf/util/intel-pt.c to = &br_stack->entries[0].from; from 1684 tools/perf/util/intel-pt.c const u64 *from = items->val[i]; from 1686 tools/perf/util/intel-pt.c for (; mask; mask >>= 3, from += 3) { from 1688 tools/perf/util/intel-pt.c *to++ = from[0]; from 1689 tools/perf/util/intel-pt.c *to++ = from[1]; from 1690 tools/perf/util/intel-pt.c *to++ = intel_pt_lbr_flags(from[2]); from 2091 tools/perf/util/machine.c ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from); from 2127 tools/perf/util/machine.c int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ; from 2132 tools/perf/util/machine.c } else if (l[chash[h]].from == l[i].from) { from 2137 tools/perf/util/machine.c if (l[j].from != l[i + off].from) { from 2213 tools/perf/util/machine.c ip = lbr_stack->entries[k].from; from 2221 tools/perf/util/machine.c lbr_stack->entries[0].from; from 2226 tools/perf/util/machine.c ip = lbr_stack->entries[k].from; from 2237 tools/perf/util/machine.c lbr_stack->entries[0].from; from 2346 tools/perf/util/machine.c else if (be[i].from < chain->ips[first_call] && from 2347 tools/perf/util/machine.c be[i].from >= chain->ips[first_call] - 8) from 2361 tools/perf/util/machine.c NULL, be[i].from); from 2365 tools/perf/util/machine.c NULL, be[i].from, from 390 tools/perf/util/map.c struct map *map__clone(struct map *from) from 392 tools/perf/util/map.c struct map *map = memdup(from, sizeof(*map)); from 1285 tools/perf/util/pmu.c void perf_pmu__set_format(unsigned long *bits, long from, long to) from 1290 tools/perf/util/pmu.c to = from; from 1293 tools/perf/util/pmu.c for (b = from; b <= to; b++) from 82 tools/perf/util/pmu.h void perf_pmu__set_format(unsigned long *bits, long from, long to); from 487 tools/perf/util/scripting-engines/trace-event-python.c PyLong_FromUnsignedLongLong(br->entries[i].from)); from 502 tools/perf/util/scripting-engines/trace-event-python.c br->entries[i].from, &al); from 584 tools/perf/util/scripting-engines/trace-event-python.c br->entries[i].from, &al); from 1044 tools/perf/util/session.c (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from); from 1077 tools/perf/util/session.c i, e->from, e->to, from 1086 tools/perf/util/session.c i, i > 0 ? e->from : e->to); from 396 tools/perf/util/sort.c left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from); from 399 tools/perf/util/sort.c right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from); from 772 tools/perf/util/sort.c return _sort__dso_cmp(left->branch_info->from.map, from 773 tools/perf/util/sort.c right->branch_info->from.map); from 780 tools/perf/util/sort.c return _hist_entry__dso_snprintf(he->branch_info->from.map, from 794 tools/perf/util/sort.c return dso && (!he->branch_info || !he->branch_info->from.map || from 795 tools/perf/util/sort.c he->branch_info->from.map->dso != dso); from 833 tools/perf/util/sort.c struct addr_map_symbol *from_l = &left->branch_info->from; from 834 tools/perf/util/sort.c struct addr_map_symbol *from_r = &right->branch_info->from; from 839 tools/perf/util/sort.c from_l = &left->branch_info->from; from 840 tools/perf/util/sort.c from_r = &right->branch_info->from; from 869 tools/perf/util/sort.c struct addr_map_symbol *from = &he->branch_info->from; from 871 tools/perf/util/sort.c return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, from 899 tools/perf/util/sort.c return sym && !(he->branch_info && he->branch_info->from.sym && from 900 tools/perf/util/sort.c strstr(he->branch_info->from.sym->name, sym)); from 1257 tools/perf/util/symbol-elf.c static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len) from 1270 tools/perf/util/symbol-elf.c if (lseek(from, from_offs, SEEK_SET) != from_offs) from 1278 tools/perf/util/symbol-elf.c r = read(from, buf, n); from 1371 tools/perf/util/symbol-elf.c static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) from 1374 tools/perf/util/symbol-elf.c GElf_Ehdr *kehdr = &from->ehdr; from 1388 tools/perf/util/symbol-elf.c if (from->elfclass == ELFCLASS32) { from 1768 tools/perf/util/symbol-elf.c static int kcore_copy__compare_fds(int from, int to) from 1783 tools/perf/util/symbol-elf.c ret = read(from, buf_from, page_size); from 1809 tools/perf/util/symbol-elf.c int from, to, err = -1; from 1811 tools/perf/util/symbol-elf.c from = open(from_filename, O_RDONLY); from 1812 tools/perf/util/symbol-elf.c if (from < 0) from 1819 tools/perf/util/symbol-elf.c err = kcore_copy__compare_fds(from, to); from 1823 tools/perf/util/symbol-elf.c close(from); from 1015 tools/perf/util/symbol.c int compare_proc_modules(const char *from, const char *to) from 1023 tools/perf/util/symbol.c if (read_proc_modules(from, &from_modules)) from 220 tools/perf/util/symbol.h int compare_proc_modules(const char *from, const char *to); from 149 tools/testing/selftests/bpf/bpf_helpers.h void *from, unsigned int len) = from 280 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) = from 282 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) = from 284 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) = from 286 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_csum_diff)(void *from, int from_size, void *to, int to_size, int seed) = from 171 tools/testing/selftests/bpf/progs/test_tcp_estats.c static __always_inline void unaligned_u32_set(unsigned char *to, __u8 *from) from 173 tools/testing/selftests/bpf/progs/test_tcp_estats.c to[0] = _(from[0]); from 174 tools/testing/selftests/bpf/progs/test_tcp_estats.c to[1] = _(from[1]); from 175 tools/testing/selftests/bpf/progs/test_tcp_estats.c to[2] = _(from[2]); from 176 tools/testing/selftests/bpf/progs/test_tcp_estats.c to[3] = _(from[3]); from 672 tools/testing/selftests/bpf/test_align.c static int do_test(unsigned int from, unsigned int to) from 678 tools/testing/selftests/bpf/test_align.c for (i = from; i < to; i++) { from 700 tools/testing/selftests/bpf/test_align.c unsigned int from = 0, to = ARRAY_SIZE(tests); from 707 tools/testing/selftests/bpf/test_align.c from = l; from 714 tools/testing/selftests/bpf/test_align.c from = t; from 718 tools/testing/selftests/bpf/test_align.c return do_test(from, to); from 1099 tools/testing/selftests/bpf/test_verifier.c static int do_test(bool unpriv, unsigned int from, unsigned int to) from 1103 tools/testing/selftests/bpf/test_verifier.c for (i = from; i < to; i++) { from 1137 tools/testing/selftests/bpf/test_verifier.c unsigned int from = 0, to = ARRAY_SIZE(tests); from 1152 tools/testing/selftests/bpf/test_verifier.c from = l; from 1159 tools/testing/selftests/bpf/test_verifier.c from = t; from 1172 tools/testing/selftests/bpf/test_verifier.c return do_test(unpriv, from, to); from 156 tools/testing/selftests/capabilities/test_execve.c int from = openat(fromfd, fromname, O_RDONLY); from 157 tools/testing/selftests/capabilities/test_execve.c if (from == -1) from 164 tools/testing/selftests/capabilities/test_execve.c ssize_t sz = read(from, buf, sizeof(buf)); from 175 tools/testing/selftests/capabilities/test_execve.c close(from); from 54 tools/testing/selftests/powerpc/copyloops/exc_validate.c unsigned long COPY_LOOP(void *to, const void *from, unsigned long size); from 55 tools/testing/selftests/powerpc/copyloops/exc_validate.c unsigned long test_copy_tofrom_user_reference(void *to, const void *from, unsigned long size); from 15 tools/testing/selftests/powerpc/copyloops/validate.c unsigned long COPY_LOOP(void *to, const void *from, unsigned long size); from 30 tools/virtio/linux/uaccess.h static void volatile_memcpy(volatile char *to, const volatile char *from, from 34 tools/virtio/linux/uaccess.h *(to++) = *(from++); from 37 tools/virtio/linux/uaccess.h static inline int copy_from_user(void *to, const void __user volatile *from, from 40 tools/virtio/linux/uaccess.h __chk_user_ptr(from, n); from 41 tools/virtio/linux/uaccess.h volatile_memcpy(to, from, n); from 45 tools/virtio/linux/uaccess.h static inline int copy_to_user(void __user volatile *to, const void *from, from 49 tools/virtio/linux/uaccess.h volatile_memcpy(to, from, n); from 750 virt/kvm/arm/mmu.c int create_hyp_mappings(void *from, void *to, pgprot_t prot) from 754 virt/kvm/arm/mmu.c unsigned long start = kern_hyp_va((unsigned long)from); from 766 virt/kvm/arm/mmu.c phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);